pax_global_header00006660000000000000000000000064135607613040014517gustar00rootroot0000000000000052 comment=212a7620634eeaadb4916dc84cec00feaad60629 prometheus-pushgateway-1.0.0+ds/000077500000000000000000000000001356076130400166515ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/.circleci/000077500000000000000000000000001356076130400205045ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/.circleci/config.yml000066400000000000000000000017121356076130400224750ustar00rootroot00000000000000--- version: 2.1 orbs: prometheus: prometheus/prometheus@0.1.0 executors: # Whenever the Go version is updated here, .travis.yml and .promu.yml # should also be updated. golang: docker: - image: circleci/golang:1.13 jobs: test: executor: golang steps: - prometheus/setup_environment - run: make - prometheus/store_artifact: file: pushgateway workflows: version: 2 pushgateway: jobs: - test: filters: tags: only: /.*/ - prometheus/build: name: build filters: tags: only: /.*/ - prometheus/publish_master: requires: - test - build filters: branches: only: master - prometheus/publish_release: requires: - test - build filters: tags: only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ branches: ignore: /.*/ prometheus-pushgateway-1.0.0+ds/.dockerignore000066400000000000000000000001211356076130400213170ustar00rootroot00000000000000.build/ .tarballs/ !.build/linux-amd64/ !.build/linux-armv7 !.build/linux-arm64 prometheus-pushgateway-1.0.0+ds/.github/000077500000000000000000000000001356076130400202115ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/.github/ISSUE_TEMPLATE.md000066400000000000000000000033031356076130400227150ustar00rootroot00000000000000 ## Feature request **Use case. Why is this important?** *“Nice to have” is not a good use case. :)* ## Bug Report **What did you do?** **What did you expect to see?** **What did you see instead? Under which circumstances?** **Environment** * System information: Insert output of `uname -srm` here. * Pushgateway version: Insert output of `pushgateway --version` here. * Pushgateway command line: Insert full command line. * Logs: ``` Insert Pushgateway logs relevant to the issue here. ``` prometheus-pushgateway-1.0.0+ds/.gitignore000066400000000000000000000001031356076130400206330ustar00rootroot00000000000000/pushgateway /.build /.release /.tarballs *.test *~ *.exe *.tar.gz prometheus-pushgateway-1.0.0+ds/.golangci.yml000066400000000000000000000002541356076130400212360ustar00rootroot00000000000000run: modules-download-mode: vendor # Run only staticcheck for now. Additional linters will be enabled one-by-one. linters: enable: - staticcheck disable-all: true prometheus-pushgateway-1.0.0+ds/.promu.yml000066400000000000000000000012431356076130400206140ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .travis.yml and # .circle/config.yml should also be updated. version: 1.13 repository: path: github.com/prometheus/pushgateway build: flags: -mod=vendor -a -tags netgo ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE prometheus-pushgateway-1.0.0+ds/.travis.yml000066400000000000000000000000701356076130400207570ustar00rootroot00000000000000sudo: false language: go go: - 1.13.x script: - make prometheus-pushgateway-1.0.0+ds/CHANGELOG.md000066400000000000000000000140131356076130400204610ustar00rootroot00000000000000## 1.0.0 / 2019-10-15 _This release does not support the storage format of v0.5–v0.9 anymore. Only persistence files created by v0.10+ are usable. Upgrade to v0.10 first to convert existing persistence files._ [CHANGE] Remove code to convert the legacy v0.5–v0.9 storage format. ## 0.10.0 / 2019-10-10 _This release changes the storage format. v0.10 can read the storage format of v0.5–v0.9. It will then persist the new format so that a downgrade won't be possible anymore._ [CHANGE] Change of the storage format (necessary for the hash collision bugfix below). #293 [CHANGE] Check pushed metrics immediately and reject them if inconsistent. Successful pushes now result in code 200 (not 202). Failures result in code 400 and are logged at error level. #290 [FEATURE] Shutdown via HTTP request. Enable with `--web.enable-lifecycle`. #292 [FEATURE] Wipe storage completely via HTTP request and via web UI. Enable with `--web.enable-admin-api`. #287 #285 [BUGFIX] Rule out hash collisions between metric groups. #293 [BUGFIX] Avoid multiple calls of `http.Error` in push handler. #291 ## 0.9.1 / 2019-08-01 [BUGFIX] Make `--web.external-url` and `--web.route-prefix` work as documented. #274 ## 0.9.0 / 2019-07-23 [CHANGE] Web: Update to Bootstrap 4.3.1 and jquery 3.4.1, changing appearance of the web UI to be more in line with the Prometheus server. Also add favicon and remove timestamp column. #261 [CHANGE] Update logging to be in line with other Prometheus projects, using gokit and promlog. #263 [FEATURE] Add optional base64 encoding for label values in the grouping key. #268 [FEATURE] Add ARM container images. #265 [FEATURE] Log errors during scrapes. #267 [BUGFIX] Web: Fixed Content-Type for js and css instead of using /etc/mime.types. #252 ## 0.8.0 / 2019-04-13 _If you use the prebuilt Docker container or you build your own one based on the provided Dockerfile, note that this release changes the user to `nobody`. Should you use a persistence file, make sure it is readable and writable by user `nobody`._ * [CHANGE] Run as user `nobody` in Docker. #242 * [CHANGE] Adjust `--web.route-prefix` to work the same as in Prometheus. #190 * [FEATURE] Add `--web.external-url` flag (like in Prometheus). #190 ## 0.7.0 / 2018-12-07 _As preparation for the 1.0.0 release, this release removes the long deprecated legacy HTTP push endpoint (which uses `/jobs/` rather than `/job/` in the URL)._ * [CHANGE] Remove legacy push API. #227 * [ENHANCEMENT] Update dependencies. #230 * [ENHANCEMENT] Support Go modules. #221 * [BUGFIX] Avoid crash when started with v0.4 storage. #223 ## 0.6.0 / 2018-10-17 _Persistence storage prior to 0.5.0 is unsupported. Upgrade to 0.5.2 first for conversion._ * [CHANGE] Enforce consistency of help strings by changing them during exposition. (An INFO-level log message describes the change.) #194 * [CHANGE] Drop support of pre-0.5 storage format. * [CHANGE] Use prometheus/client_golang v0.9, which changes the `http_...` metrics. (See README.md for full documentation of exposed metrics.) ## 0.5.2 / 2018-06-15 * [BUGFIX] Update client_golang/prometheus vendoring to allow inconsistent labels. #185 ## 0.5.1 / 2018-05-30 * [BUGFIX] Fix conversion of old persistency format (0.4.0 and earlier). #179 * [BUGFIX] Make _Delete Group_ button work again. #177 * [BUGFIX] Don't display useless flags on status page. #176 ## 0.5.0 / 2018-05-23 Breaking change: * Flags now require double-dash. * The persistence storage format has been updated. Upgrade is transparent, but downgrade to 0.4.0 and prior is unsupported. * Persistence storage prior to 0.1.0 is unsupported. * [CHANGE] Replaced Flags with Kingpin #152 * [CHANGE] Slightly changed disk format for persistence. v0.5 can still read the pre-v0.5 format. #172 * [ENHANCEMENT] Debug level logging now shows client-induced errors #123 * [FEATURE] Add /-/ready and /-/healthy #135 * [FEATURE] Add web.route-prefix flag #146 * [BUGFIX] Fix incorrect persistence of certain values in a metric family. #172 ## 0.4.0 / 2017-06-09 * [CHANGE] Pushes with timestamps are now rejected. * [FEATURE] Added push_time_seconds metric to each push. * [ENHANCEMENT] Point at community page rather than the dev list in the UI. * [BUGFIX] Return HTTP 400 on parse error, rather than 500. ## 0.3.1 / 2016-11-03 * [BUGFIX] Fixed a race condition in the storage layer. * [ENHANCEMENT] Improved README.md. ## 0.3.0 / 2016-06-07 * [CHANGE] Push now rejects improper and reserved labels. * [CHANGE] Required labels flag removed. * [BUGFIX] Docker image actually works now. * [ENHANCEMENT] Converted to Promu build process. * [CHANGE] As a consequence of the above, changed dir structure in tar ball. * [ENHANCEMENT] Updated dependencies, with all the necessary code changes. * [ENHANCEMENT] Dependencies now vendored. * [ENHANCEMENT] `bindata.go` checked in, Pushgateway now `go get`-able. * [ENHANCEMENT] Various documentation improvements. * [CLEANUP] Various code cleanups. ## 0.2.0 / 2015-06-25 * [CHANGE] Support arbitrary grouping of metrics. * [CHANGE] Changed behavior of HTTP DELETE method (see README.md for details). ## 0.1.2 / 2015-06-08 * [CHANGE] Move pushgateway binary in archive from bin/ to /. * [CHANGE] Migrate logging to prometheus/log. ## 0.1.1 / 2015-05-05 * [BUGFIX] Properly display histograms in web status. * [BUGFIX] Fix value formatting. * [CHANGE] Make flag names consistent across projects. * [ENHANCEMENT] Auto-fill instance with IPv6 address. * [BUGFIX] Fix Go download link for several archs and OSes. * [BUGFIX] Use HTTPS and golang.org for Go download. * [BUGFIX] Re-add pprof endpoints. ## 0.1.0 / 2014-08-13 * [FEATURE] When being scraped, metrics of the same name but with different job/instance label are now merged into one metric family. * [FEATURE] Added Dockerfile. * [CHANGE] Default HTTP port now 9091. * [BUGFIX] Fixed parsing of content-type header. * [BUGFIX] Fixed race condition in handlers. * [PERFORMANCE] Replaced Martini with Httprouter. * [ENHANCEMENT] Migrated to new client_golang. * [ENHANCEMENT] Made internal metrics more consistent. * [ENHANCEMENT] Added http instrumentation. prometheus-pushgateway-1.0.0+ds/CONTRIBUTING.md000066400000000000000000000015461356076130400211100ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). prometheus-pushgateway-1.0.0+ds/Dockerfile000066400000000000000000000006311356076130400206430ustar00rootroot00000000000000ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" ARG OS="linux" COPY --chown=nobody:nogroup .build/${OS}-${ARCH}/pushgateway /bin/pushgateway EXPOSE 9091 RUN mkdir -p /pushgateway && chown nobody:nogroup /pushgateway WORKDIR /pushgateway USER 65534 ENTRYPOINT [ "/bin/pushgateway" ] prometheus-pushgateway-1.0.0+ds/LICENSE000066400000000000000000000261351356076130400176650ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-pushgateway-1.0.0+ds/MAINTAINERS.md000066400000000000000000000000601356076130400207410ustar00rootroot00000000000000* Björn Rabenstein @beorn7 prometheus-pushgateway-1.0.0+ds/Makefile000066400000000000000000000015711356076130400203150ustar00rootroot00000000000000# Copyright 2016 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Needs to be defined before including Makefile.common to auto-generate targets DOCKER_ARCHS ?= amd64 armv7 arm64 include Makefile.common DOCKER_IMAGE_NAME ?= pushgateway assets: @echo ">> writing assets" @cd $(PREFIX)/asset && GO111MODULE=$(GO111MODULE) $(GO) generate && $(GOFMT) -w assets_vfsdata.go prometheus-pushgateway-1.0.0+ds/Makefile.common000066400000000000000000000217771356076130400216160ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') GOVENDOR := GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). GO111MODULE := on ifneq (,$(wildcard vendor)) # Always use the local vendor/ directory to satisfy the dependencies. GOOPTS := $(GOOPTS) -mod=vendor endif endif else ifneq (,$(wildcard go.mod)) ifneq (,$(wildcard vendor)) $(warning This repository requires Go >= 1.11 because of Go modules) $(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') endif else # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif PROMU_VERSION ?= 0.5.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.18.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" ifdef GO111MODULE GO111MODULE=$(GO111MODULE) $(GO) mod download else $(GO) get $(GOOPTS) -t ./... endif .PHONY: common-test-short common-test-short: @echo ">> running short tests" GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: @echo ">> running all tests" GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @echo ">> formatting code" GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else $(GOLANGCI_LINT) run $(pkgs) endif endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) ifdef GOVENDOR @echo ">> running check for unused packages" @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod else @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ endif endif endif .PHONY: common-build common-build: promu @echo ">> building binaries" GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR .PHONY: $(GOVENDOR) $(GOVENDOR): GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef prometheus-pushgateway-1.0.0+ds/NOTICE000066400000000000000000000007471356076130400175650ustar00rootroot00000000000000Pushgateway for ephemeral and batch jobs Copyright 2014-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: jQuery https://jquery.org Copyright jQuery Foundation and other contributors Licensed under the MIT License Bootstrap http://getbootstrap.com Copyright (c) 2011-2019 Twitter, Inc. Copyright (c) 2011-2019 The Bootstrap Authors Licensed under the MIT License prometheus-pushgateway-1.0.0+ds/README.md000066400000000000000000000506141356076130400201360ustar00rootroot00000000000000# Prometheus Pushgateway [![Build Status](https://travis-ci.org/prometheus/pushgateway.svg)][travis] [![CircleCI](https://circleci.com/gh/prometheus/pushgateway/tree/master.svg?style=shield)][circleci] [![Docker Repository on Quay](https://quay.io/repository/prometheus/pushgateway/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/pushgateway.svg?maxAge=604800)][hub] The Prometheus Pushgateway exists to allow ephemeral and batch jobs to expose their metrics to Prometheus. Since these kinds of jobs may not exist long enough to be scraped, they can instead push their metrics to a Pushgateway. The Pushgateway then exposes these metrics to Prometheus. ## Non-goals The Pushgateway is explicitly not an _aggregator or distributed counter_ but rather a metrics cache. It does not have [statsd](https://github.com/etsy/statsd)-like semantics. The metrics pushed are exactly the same as you would present for scraping in a permanently running program. If you need distributed counting, you could either use the actual statsd in combination with the [Prometheus statsd exporter](https://github.com/prometheus/statsd_exporter), or have a look at [Weavework's aggregation gateway](https://github.com/weaveworks/prom-aggregation-gateway). With more experience gathered, the Prometheus project might one day be able to provide a native solution, separate from or possibly even as part of the Pushgateway. For machine-level metrics, the [textfile](https://github.com/prometheus/node_exporter/blob/master/README.md#textfile-collector) collector of the Node exporter is usually more appropriate. The Pushgateway is intended for service-level metrics. The Pushgateway is not an _event store_. While you can use Prometheus as a data source for [Grafana annotations](http://docs.grafana.org/reference/annotations/), tracking something like release events has to happen with some event-logging framework. A while ago, we [decided to not implement a “timeout” or TTL for pushed metrics](https://github.com/prometheus/pushgateway/issues/19) because almost all proposed use cases turned out to be anti-patterns we strongly discourage. You can follow a more recent discussion on the [prometheus-developers mailing list](https://groups.google.com/forum/#!topic/prometheus-developers/9IyUxRvhY7w). ## Run it Download binary releases for your platform from the [release page](https://github.com/prometheus/pushgateway/releases) and unpack the tarball. If you want to compile yourself from the sources, you need a working Go setup. Then use the provided Makefile (type `make`). For the most basic setup, just start the binary. To change the address to listen on, use the `--web.listen-address` flag (e.g. "0.0.0.0:9091" or ":9091"). By default, Pushgateway does not persist metrics. However, the `--persistence.file` flag allows you to specify a file in which the pushed metrics will be persisted (so that they survive restarts of the Pushgateway). ### Using Docker You can deploy the Pushgateway using the [prom/pushgateway](https://registry.hub.docker.com/u/prom/pushgateway/) Docker image. For example: ```bash docker pull prom/pushgateway docker run -d -p 9091:9091 prom/pushgateway ``` ## Use it ### Configure the Pushgateway as a target to scrape The Pushgateway has to be configured as a target to scrape by Prometheus, using one of the usual methods. _However, you should always set `honor_labels: true` in the scrape config_ (see [below](#about-the-job-and-instance-labels) for a detailed explanation). ### Libraries Prometheus client libraries should have a feature to push the registered metrics to a Pushgateway. Usually, a Prometheus client passively presents metric for scraping by a Prometheus server. A client library that supports pushing has a push function, which needs to be called by the client code. It will then actively push the metrics to a Pushgateway, using the API described below. ### Command line Using the Prometheus text protocol, pushing metrics is so easy that no separate CLI is provided. Simply use a command-line HTTP tool like `curl`. Your favorite scripting language has most likely some built-in HTTP capabilities you can leverage here as well. *Note that in the text protocol, each line has to end with a line-feed character (aka 'LF' or '\n'). Ending a line in other ways, e.g. with 'CR' aka '\r', 'CRLF' aka '\r\n', or just the end of the packet, will result in a protocol error.* Pushed metrics are managed in groups, identified by a grouping key of any number of labels, of which the first must be the `job` label. The groups are easy to inspect via the web interface. *For implications of special characters in label values see the [URL section](#url) below.* Examples: * Push a single sample into the group identified by `{job="some_job"}`: echo "some_metric 3.14" | curl --data-binary @- http://pushgateway.example.org:9091/metrics/job/some_job Since no type information has been provided, `some_metric` will be of type `untyped`. * Push something more complex into the group identified by `{job="some_job",instance="some_instance"}`: cat <1, you might be tempted to believe that Prometheus will scrape them with that same timestamp *t*1. Instead, what Prometheus attaches as a timestamp is the time when it scrapes the Pushgateway. Why so? In the world view of Prometheus, a metric can be scraped at any time. A metric that cannot be scraped has basically ceased to exist. Prometheus is somewhat tolerant, but if it cannot get any samples for a metric in 5min, it will behave as if that metric does not exist anymore. Preventing that is actually one of the reasons to use a Pushgateway. The Pushgateway will make the metrics of your ephemeral job scrapable at any time. Attaching the time of pushing as a timestamp would defeat that purpose because 5min after the last push, your metric will look as stale to Prometheus as if it could not be scraped at all anymore. (Prometheus knows only one timestamp per sample, there is no way to distinguish a 'time of pushing' and a 'time of scraping'.) As there aren't any use cases where it would make sense to to attach a different timestamp, and many users attempting to incorrectly do so (despite no client library supporting this), the Pushgateway rejects any pushes with timestamps. If you think you need to push a timestamp, please see [When To Use The Pushgateway](https://prometheus.io/docs/practices/pushing/). In order to make it easier to alert on failed pushers or those that have not run recently, the Pushgateway will add in the metrics `push_time_seconds` and `push_failure_time_seconds` with the Unix timestamp of the last successful and failed `POST`/`PUT` to each group. This will override any pushed metric by that name. A value of zero for either metric implies that the group has never seen a successful or failed `POST`/`PUT`. ## API All pushes are done via HTTP. The interface is vaguely REST-like. ### URL The default port the push gateway is listening to is 9091. The path looks like /metrics/job/{//} `` is used as the value of the `job` label, followed by any number of other label pairs (which might or might not include an `instance` label). The label set defined by the URL path is used as a grouping key. Any of those labels already set in the body of the request (as regular labels, e.g. `name{job="foo"} 42`) _will be overwritten to match the labels defined by the URL path!_ If `job` or any label name is suffixed with `@base64`, the following job name or label value is interpreted as a base64 encoded string according to [RFC 4648, using the URL and filename safe alphabet](https://tools.ietf.org/html/rfc4648#section-5). (Padding is optional.) This is the only way of using job names or label values that contain a `/`. For other special characters, the usual URI component encoding works, too, but the base64 might be more convenient. Ideally, client libraries take care of the suffixing and encoding. Examples: * To use the grouping key `job="directory_cleaner",path="/var/tmp"`, the following path will _not_ work: /metrics/job/directory_cleaner/path//var/tmp Instead, use the base64 URL-safe encoding for the label value and mark it by suffixing the label name with `@base64`: /metrics/job/directory_cleaner/path@base64/L3Zhci90bXA If you are not using a client library that handles the encoding for you, you can use encoding tools. For example, there is a command line tool `base64url` (Debian package `basez`), which you could combine with `curl` to push from the command line in the following way: echo 'some_metric{foo="bar"} 3.14' | curl --data-binary @- http://pushgateway.example.org:9091/metrics/job/directory_cleaner/path@base64/$(echo -n '/var/tmp' | base64url) * The grouping key `job="titan",name="Προμηθεύς"` can be represented “traditionally” with URI encoding: /metrics/job/titan/name/%CE%A0%CF%81%CE%BF%CE%BC%CE%B7%CE%B8%CE%B5%CF%8D%CF%82 Or you can use the more compact base64 encoding: /metrics/job/titan/name@base64/zqDPgc6_zrzOt864zrXPjc-C ### `PUT` method `PUT` is used to push a group of metrics. All metrics with the grouping key specified in the URL are replaced by the metrics pushed with `PUT`. The body of the request contains the metrics to push either as delimited binary protocol buffers or in the simple flat text format (both in version 0.0.4, see the [data exposition format specification](https://docs.google.com/document/d/1ZjyKiKxZV83VI9ZKAXRGKaUKK2BIWCT7oiGBKDBpjEY/edit?usp=sharing)). Discrimination between the two variants is done via the `Content-Type` header. (Use the value `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` for protocol buffers, otherwise the text format is tried as a fall-back.) The response code upon success is either 200 or 400. A 200 response implies a successful push, either replacing an existing group of metrics or creating a new one. A 400 response can happen if the request is malformed or if the pushed metrics are inconsistent with metrics pushed to other groups or collide with metrics of the Pushgateway itself. An explanation is returned in the body of the response and logged on error level. In rare cases, it is possible that the Pushgateway ends up with an inconsistent set of metrics already pushed. In that case, new pushes are also rejected as inconsistent even if the culprit is metrics that were pushed earlier. Delete the offending metrics to get out of that situation. _If using the protobuf format, do not send duplicate MetricFamily proto messages (i.e. more than one with the same name) in one push, as they will overwrite each other._ Note that the Pushgateway doesn't provide any strong guarantees that the pushed metrics are persisted to disk. (A server crash may cause data loss. Or the push gateway is configured to not persist to disk at all.) A `PUT` request with an empty body effectively deletes all metrics with the specified grouping key. However, in contrast to the [`DELETE` request](#delete-method) described below, it does update the `push_time_seconds` metrics. ### `POST` method `POST` works exactly like the `PUT` method but only metrics with the same name as the newly pushed metrics are replaced (among those with the same grouping key). A `POST` request with an empty body merely updates the `push_time_seconds` metrics but does not change any of the previously pushed metrics. ### `DELETE` method `DELETE` is used to delete metrics from the push gateway. The request must not contain any content. All metrics with the grouping key specified in the URL are deleted. The response code upon success is always 202. The delete request is merely queued at that moment. There is no guarantee that the request will actually be executed or that the result will make it to the persistence layer (e.g. in case of a server crash). However, the order of `PUT`/`POST` and `DELETE` request is guaranteed, i.e. if you have successfully sent a `DELETE` request and then send a `PUT`, it is guaranteed that the `DELETE` will be processed first (and vice versa). Deleting a grouping key without metrics is a no-op and will not result in an error. ## Admin API The Admin API provides administrative access to the Pushgateway, and must be explicitly enabled by setting `--web.enable-admin-api` flag. ### URL The default port the Pushgateway is listening to is 9091. The path looks like: /api//admin/ * Available endpoints: | HTTP_METHOD| API_VERSION | HANDLER | DESCRIPTION | | :-------: |:-------------:| :-----:| :----- | | PUT | v1 | wipe | Safely deletes all metrics from the Pushgateway. | * For example to wipe all metrics from the Pushgateway: curl -X PUT http://pushgateway.example.org:9091/api/v1/admin/wipe ## Management API The Pushgateway provides a set of management API to ease automation and integrations. * Available endpoints: | HTTP_METHOD | PATH | DESCRIPTION | | :-------: | :-----| :----- | | GET | /-/healthy | Returns 200 whenever the Pushgateway is healthy. | | GET | /-/ready | Returns 200 whenever the Pushgateway is ready to serve traffic. | * The following endpoint is disabled by default and can be enabled via the `--web.enable-lifecycle` flag. | HTTP_METHOD | PATH | DESCRIPTION | | :-------: | :-----| :----- | | PUT | /-/quit | Triggers a graceful shutdown of Pushgateway. | Alternatively, a graceful shutdown can be triggered by sending a `SIGTERM` to the Pushgateway process. ## Exposed metrics The Pushgateway exposes the following metrics via the configured `--web.telemetry-path` (default: `/metrics`): - The pushed metrics. - For each pushed group, a metric `push_time_seconds` and `push_failure_time_seconds` as explained above. - The usual metrics provided by the [Prometheus Go client library](https://github.com/prometheus/client_golang), i.e.: - `process_...` - `go_...` - `promhttp_metric_handler_requests_...` - A number of metrics specific to the Pushgateway, as documented by the example scrape below. ``` # HELP pushgateway_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which pushgateway was built. # TYPE pushgateway_build_info gauge pushgateway_build_info{branch="master",goversion="go1.10.2",revision="8f88ccb0343fc3382f6b93a9d258797dcb15f770",version="0.5.2"} 1 # HELP pushgateway_http_push_duration_seconds HTTP request duration for pushes to the Pushgateway. # TYPE pushgateway_http_push_duration_seconds summary pushgateway_http_push_duration_seconds{method="post",quantile="0.1"} 0.000116755 pushgateway_http_push_duration_seconds{method="post",quantile="0.5"} 0.000192608 pushgateway_http_push_duration_seconds{method="post",quantile="0.9"} 0.000327593 pushgateway_http_push_duration_seconds_sum{method="post"} 0.001622878 pushgateway_http_push_duration_seconds_count{method="post"} 8 # HELP pushgateway_http_push_size_bytes HTTP request size for pushes to the Pushgateway. # TYPE pushgateway_http_push_size_bytes summary pushgateway_http_push_size_bytes{method="post",quantile="0.1"} 166 pushgateway_http_push_size_bytes{method="post",quantile="0.5"} 182 pushgateway_http_push_size_bytes{method="post",quantile="0.9"} 196 pushgateway_http_push_size_bytes_sum{method="post"} 1450 pushgateway_http_push_size_bytes_count{method="post"} 8 # HELP pushgateway_http_requests_total Total HTTP requests processed by the Pushgateway, excluding scrapes. # TYPE pushgateway_http_requests_total counter pushgateway_http_requests_total{code="200",handler="static",method="get"} 5 pushgateway_http_requests_total{code="200",handler="status",method="get"} 8 pushgateway_http_requests_total{code="202",handler="delete",method="delete"} 1 pushgateway_http_requests_total{code="202",handler="push",method="post"} 6 pushgateway_http_requests_total{code="400",handler="push",method="post"} 2 ``` ### Alerting on failed pushes It is in general a good idea to alert on `push_time_seconds` being much farther behind than expected. This will catch both failed pushes as well as pushers being down completely. To detect failed pushes much earlier, alert on `push_failure_time_seconds > push_time_seconds`. Pushes can also fail because they are malformed. In this case, they never reach any metric group and therefore won't set any `push_failure_time_seconds` metrics. Those pushes are still counted as `pushgateway_http_requests_total{code="400",handler="push"}`. You can alert on the `rate` of this metric, but you have to inspect the logs to identify the offending pusher. ## Development The normal binary embeds the web files in the `resources` directory. For development purposes, it is handy to have a running binary use those files directly (so that you can see the effect of changes immediately). To switch to direct usage, add `-tags dev` to the `flags` entry in `.promu.yml`, and then `make build`. Switch back to "normal" mode by reverting the changes to `.promu.yml` and typing `make assets`. ## Contributing Relevant style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). [travis]: https://travis-ci.org/prometheus/pushgateway [hub]: https://hub.docker.com/r/prom/pushgateway/ [circleci]: https://circleci.com/gh/prometheus/pushgateway [quay]: https://quay.io/repository/prometheus/pushgateway prometheus-pushgateway-1.0.0+ds/VERSION000066400000000000000000000000061356076130400177150ustar00rootroot000000000000001.0.0 prometheus-pushgateway-1.0.0+ds/asset/000077500000000000000000000000001356076130400177705ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/asset/asset.go000066400000000000000000000013461356076130400214420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build dev package asset import ( "net/http" ) // Assets contains the project's assets. var Assets http.FileSystem = http.Dir("../resources") prometheus-pushgateway-1.0.0+ds/asset/asset_generate.go000066400000000000000000000016121356076130400233100ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build ignore package main import ( "log" "github.com/prometheus/pushgateway/asset" "github.com/shurcooL/vfsgen" ) func main() { err := vfsgen.Generate(asset.Assets, vfsgen.Options{ PackageName: "asset", BuildTags: "!dev", VariableName: "Assets", }) if err != nil { log.Fatalln(err) } } prometheus-pushgateway-1.0.0+ds/asset/doc.go000066400000000000000000000014541356076130400210700ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package asset provides the assets via a virtual filesystem. package asset import ( // The blank import is to make govendor happy. _ "github.com/shurcooL/vfsgen" ) //go:generate go run -tags=dev asset_generate.go prometheus-pushgateway-1.0.0+ds/go.mod000066400000000000000000000012751356076130400177640ustar00rootroot00000000000000module github.com/prometheus/pushgateway require ( github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect github.com/go-kit/kit v0.9.0 github.com/golang/protobuf v1.3.2 github.com/julienschmidt/httprouter v1.3.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/prometheus/client_golang v1.2.0 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/common v0.7.0 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd golang.org/x/tools v0.0.0-20190919031856-7460b8e10b7e // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 ) go 1.11 prometheus-pushgateway-1.0.0+ds/go.sum000066400000000000000000000310031356076130400200010ustar00rootroot00000000000000github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.2.0 h1:g4yo/h/me4ZL9o0SVHNRdS2jn5SY8GDmMgkhQ8Mz70s= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190919031856-7460b8e10b7e h1:DxffoHYXmce3WTEBU/6/5bBSV7wmPSvT+atzBfv8hJI= golang.org/x/tools v0.0.0-20190919031856-7460b8e10b7e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= prometheus-pushgateway-1.0.0+ds/handler/000077500000000000000000000000001356076130400202665ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/handler/delete.go000066400000000000000000000050401356076130400220560ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "fmt" "net/http" "sync" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/julienschmidt/httprouter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/pushgateway/storage" ) // Delete returns a handler that accepts delete requests. // // The returned handler is already instrumented for Prometheus. func Delete(ms storage.MetricStore, jobBase64Encoded bool, logger log.Logger) func(http.ResponseWriter, *http.Request, httprouter.Params) { var ps httprouter.Params var mtx sync.Mutex // Protects ps. instrumentedHandler := promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "delete"}), http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { job := ps.ByName("job") if jobBase64Encoded { var err error if job, err = decodeBase64(job); err != nil { http.Error(w, fmt.Sprintf("invalid base64 encoding in job name %q: %v", ps.ByName("job"), err), http.StatusBadRequest) level.Debug(logger).Log("msg", "invalid base64 encoding in job name", "job", ps.ByName("job"), "err", err.Error()) return } } labelsString := ps.ByName("labels") mtx.Unlock() labels, err := splitLabels(labelsString) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) level.Debug(logger).Log("msg", "failed to parse URL", "url", labelsString, "err", err.Error()) return } if job == "" { http.Error(w, "job name is required", http.StatusBadRequest) level.Debug(logger).Log("msg", "job name is required") return } labels["job"] = job ms.SubmitWriteRequest(storage.WriteRequest{ Labels: labels, Timestamp: time.Now(), }) w.WriteHeader(http.StatusAccepted) }), ) return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { mtx.Lock() ps = params instrumentedHandler.ServeHTTP(w, r) } } prometheus-pushgateway-1.0.0+ds/handler/handler_test.go000066400000000000000000000525371356076130400233050ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "bytes" "errors" "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/go-kit/kit/log" "github.com/golang/protobuf/proto" "github.com/julienschmidt/httprouter" "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/pushgateway/storage" ) var logger = log.NewNopLogger() // MockMetricStore isn't doing any of the validation and sanitation a real // metric store implementation has to do. Those are tested in the storage // package. Here we only ensure that the right method calls are performed // by the code in the handlers. type MockMetricStore struct { lastWriteRequest storage.WriteRequest metricGroups storage.GroupingKeyToMetricGroup writeRequests []storage.WriteRequest err error // If non-nil, will be sent to Done channel in request. } func (m *MockMetricStore) SubmitWriteRequest(req storage.WriteRequest) { m.writeRequests = append(m.writeRequests, req) m.lastWriteRequest = req if req.Done != nil { if m.err != nil { req.Done <- m.err } close(req.Done) } } func (m *MockMetricStore) GetMetricFamilies() []*dto.MetricFamily { panic("not implemented") } func (m *MockMetricStore) GetMetricFamiliesMap() storage.GroupingKeyToMetricGroup { return m.metricGroups } func (m *MockMetricStore) Shutdown() error { return nil } func (m *MockMetricStore) Healthy() error { return nil } func (m *MockMetricStore) Ready() error { return nil } func TestHealthyReady(t *testing.T) { mms := MockMetricStore{} req, err := http.NewRequest("GET", "http://example.org/", &bytes.Buffer{}) if err != nil { t.Fatal(err) } healthyHandler := Healthy(&mms) readyHandler := Ready(&mms) w := httptest.NewRecorder() healthyHandler.ServeHTTP(w, req) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } readyHandler.ServeHTTP(w, req) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } } func TestPush(t *testing.T) { mms := MockMetricStore{} mmsWithErr := MockMetricStore{err: errors.New("testerror")} handler := Push(&mms, false, false, logger) handlerWithErr := Push(&mmsWithErr, false, false, logger) handlerBase64 := Push(&mms, false, true, logger) req, err := http.NewRequest("POST", "http://example.org/", &bytes.Buffer{}) if err != nil { t.Fatal(err) } // No job name. w := httptest.NewRecorder() handler(w, req, httprouter.Params{}) if expected, got := http.StatusBadRequest, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if !mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp unexpectedly set: %#v", mms.lastWriteRequest) } // With job name, but no instance name and no content. mms.lastWriteRequest = storage.WriteRequest{} w = httptest.NewRecorder() handler(w, req, httprouter.Params{httprouter.Param{Key: "job", Value: "testjob"}}) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // With job name and instance name and invalid text content. mms.lastWriteRequest = storage.WriteRequest{} req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("blablabla\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handler( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "instance", Value: "testinstance"}, }, ) if expected, got := http.StatusBadRequest, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if !mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp unexpectedly set: %#v", mms.lastWriteRequest) } // With job name and instance name and text content. mms.lastWriteRequest = storage.WriteRequest{} req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("some_metric 3.14\nanother_metric{instance=\"testinstance\",job=\"testjob\"} 42\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handler( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "labels", Value: "/instance/testinstance"}, }, ) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"some_metric" type:UNTYPED metric: > `, mms.lastWriteRequest.MetricFamilies["some_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } if expected, got := `name:"another_metric" type:UNTYPED metric: label: untyped: > `, mms.lastWriteRequest.MetricFamilies["another_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // With job name and instance name and text content, storage returns error. req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("some_metric 3.14\nanother_metric{instance=\"testinstance\",job=\"testjob\"} 42\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handlerWithErr( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "labels", Value: "/instance/testinstance"}, }, ) if expected, got := http.StatusBadRequest, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mmsWithErr.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mmsWithErr.lastWriteRequest) } if expected, got := "testjob", mmsWithErr.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mmsWithErr.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"some_metric" type:UNTYPED metric: > `, mmsWithErr.lastWriteRequest.MetricFamilies["some_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } if expected, got := `name:"another_metric" type:UNTYPED metric: label: untyped: > `, mmsWithErr.lastWriteRequest.MetricFamilies["another_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // With base64-encoded job name and instance name and text content. mms.lastWriteRequest = storage.WriteRequest{} req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("some_metric 3.14\nanother_metric{instance=\"testinstance\",job=\"testjob\"} 42\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handlerBase64( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "dGVzdC9qb2I="}, // job="test/job" httprouter.Param{Key: "labels", Value: "/instance@base64/dGVzdGluc3RhbmNl"}, // instance="testinstance" }, ) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "test/job", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"some_metric" type:UNTYPED metric: > `, mms.lastWriteRequest.MetricFamilies["some_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, job label as still as in the push, not aligned to grouping labels. if expected, got := `name:"another_metric" type:UNTYPED metric: label: untyped: > `, mms.lastWriteRequest.MetricFamilies["another_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // With job name and no instance name and text content. mms.lastWriteRequest = storage.WriteRequest{} req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("some_metric 3.14\nanother_metric{instance=\"testinstance\",job=\"testjob\"} 42\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handler( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, }, ) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"some_metric" type:UNTYPED metric: > `, mms.lastWriteRequest.MetricFamilies["some_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } if expected, got := `name:"another_metric" type:UNTYPED metric: label: untyped: > `, mms.lastWriteRequest.MetricFamilies["another_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // With job name and instance name and timestamp specified. mms.lastWriteRequest = storage.WriteRequest{} req, err = http.NewRequest( "POST", "http://example.org/", bytes.NewBufferString("a 1\nb 1 1000\n"), ) if err != nil { t.Fatal(err) } w = httptest.NewRecorder() handler( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "labels", Value: "/instance/testinstance"}, }, ) // Note that a real storage shourd reject pushes with timestamps. Here // we only make sure it gets through. Rejection is tested in the storage // package. if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } // Make sure the timestamp from the push didn't make it to the WriteRequest. if time.Now().Sub(mms.lastWriteRequest.Timestamp) > time.Minute { t.Errorf("Write request timestamp set to a too low value: %#v", mms.lastWriteRequest) } if expected, got := int64(1000), mms.lastWriteRequest.MetricFamilies["b"].GetMetric()[0].GetTimestampMs(); expected != got { t.Errorf("Wanted protobuf timestamp %v, got %v.", expected, got) } // With job name and instance name and protobuf content. mms.lastWriteRequest = storage.WriteRequest{} buf := &bytes.Buffer{} _, err = pbutil.WriteDelimited(buf, &dto.MetricFamily{ Name: proto.String("some_metric"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Untyped: &dto.Untyped{ Value: proto.Float64(1.234), }, }, }, }) if err != nil { t.Fatal(err) } _, err = pbutil.WriteDelimited(buf, &dto.MetricFamily{ Name: proto.String("another_metric"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Untyped: &dto.Untyped{ Value: proto.Float64(3.14), }, }, }, }) if err != nil { t.Fatal(err) } req, err = http.NewRequest( "POST", "http://example.org/", buf, ) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/vnd.google.protobuf; encoding=delimited; proto=io.prometheus.client.MetricFamily") w = httptest.NewRecorder() handler( w, req, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "labels", Value: "/instance/testinstance"}, }, ) if expected, got := http.StatusOK, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"some_metric" type:UNTYPED metric: > `, mms.lastWriteRequest.MetricFamilies["some_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } // Note that sanitation hasn't happened yet, grouping labels not in request. if expected, got := `name:"another_metric" type:UNTYPED metric: > `, mms.lastWriteRequest.MetricFamilies["another_metric"].String(); expected != got { t.Errorf("Wanted metric family %v, got %v.", expected, got) } } func TestDelete(t *testing.T) { mms := MockMetricStore{} handler := Delete(&mms, false, logger) handlerBase64 := Delete(&mms, true, logger) // No job name. mms.lastWriteRequest = storage.WriteRequest{} w := httptest.NewRecorder() handler( w, &http.Request{}, httprouter.Params{}, ) if expected, got := http.StatusBadRequest, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if !mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp unexpectedly set: %#v", mms.lastWriteRequest) } // With job name, but no instance name. mms.lastWriteRequest = storage.WriteRequest{} w = httptest.NewRecorder() handler( w, &http.Request{}, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, }, ) if expected, got := http.StatusAccepted, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // With job name and instance name. mms.lastWriteRequest = storage.WriteRequest{} w = httptest.NewRecorder() handler( w, &http.Request{}, httprouter.Params{ httprouter.Param{Key: "job", Value: "testjob"}, httprouter.Param{Key: "labels", Value: "/instance/testinstance"}, }, ) if expected, got := http.StatusAccepted, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "testjob", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } // With base64-encoded job name and instance name. mms.lastWriteRequest = storage.WriteRequest{} w = httptest.NewRecorder() handlerBase64( w, &http.Request{}, httprouter.Params{ httprouter.Param{Key: "job", Value: "dGVzdC9qb2I="}, // job="test/job" httprouter.Param{Key: "labels", Value: "/instance@base64/dGVzdGluc3RhbmNl"}, // instance="testinstance" }, ) if expected, got := http.StatusAccepted, w.Code; expected != got { t.Errorf("Wanted status code %v, got %v.", expected, got) } if mms.lastWriteRequest.Timestamp.IsZero() { t.Errorf("Write request timestamp not set: %#v", mms.lastWriteRequest) } if expected, got := "test/job", mms.lastWriteRequest.Labels["job"]; expected != got { t.Errorf("Wanted job %v, got %v.", expected, got) } if expected, got := "testinstance", mms.lastWriteRequest.Labels["instance"]; expected != got { t.Errorf("Wanted instance %v, got %v.", expected, got) } } func TestSplitLabels(t *testing.T) { scenarios := map[string]struct { input string expectError bool expectedOutput map[string]string }{ "regular labels": { input: "/label_name1/label_value1/label_name2/label_value2", expectedOutput: map[string]string{ "label_name1": "label_value1", "label_name2": "label_value2", }, }, "invalid label name": { input: "/label_name1/label_value1/a=b/label_value2", expectError: true, }, "reserved label name": { input: "/label_name1/label_value1/__label_name2/label_value2", expectError: true, }, "unencoded slash in label value": { input: "/label_name1/label_value1/label_name2/label/value2", expectError: true, }, "encoded slash in first label value ": { input: "/label_name1@base64/bGFiZWwvdmFsdWUx/label_name2/label_value2", expectedOutput: map[string]string{ "label_name1": "label/value1", "label_name2": "label_value2", }, }, "encoded slash in last label value": { input: "/label_name1/label_value1/label_name2@base64/bGFiZWwvdmFsdWUy", expectedOutput: map[string]string{ "label_name1": "label_value1", "label_name2": "label/value2", }, }, "encoded slash in last label value with padding": { input: "/label_name1/label_value1/label_name2@base64/bGFiZWwvdmFsdWUy==", expectedOutput: map[string]string{ "label_name1": "label_value1", "label_name2": "label/value2", }, }, "invalid base64 encoding": { input: "/label_name1@base64/foo.bar/label_name2/label_value2", expectError: true, }, } for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { parsed, err := splitLabels(scenario.input) if err != nil { if scenario.expectError { return // All good. } t.Fatalf("Got unexpected error: %s.", err) } for k, v := range scenario.expectedOutput { got, ok := parsed[k] if !ok { t.Errorf("Expected to find %s=%q.", k, v) } if got != v { t.Errorf("Expected %s=%q but got %s=%q.", k, v, k, got) } delete(parsed, k) } for k, v := range parsed { t.Errorf("Found unexpected label %s=%q.", k, v) } }) } } func TestWipeMetricStore(t *testing.T) { // Create MockMetricStore with a few GroupingKeyToMetricGroup metrics // so they can be returned by GetMetricFamiliesMap() to later send write // requests for each of them. metricCount := 5 mgs := storage.GroupingKeyToMetricGroup{} for i := 0; i < metricCount; i++ { mgs[fmt.Sprint(i)] = storage.MetricGroup{} } mms := MockMetricStore{metricGroups: mgs} // Wipe handler should return 202 and delete all metrics. wipeHandler := WipeMetricStore(&mms, logger) w := httptest.NewRecorder() // Then handler is routed to the handler based on verb and path in main.go // therefore (and for now) we use the request to only record the returned status code. req, err := http.NewRequest("PUT", "http://example.org", &bytes.Buffer{}) if err != nil { t.Fatal(err) } wipeHandler.ServeHTTP(w, req) if w.Code != http.StatusAccepted { t.Errorf("status code should be %d", http.StatusAccepted) } if len(mms.writeRequests) != metricCount { t.Errorf("there should be %d write requests, got %d instead", metricCount, len(mms.writeRequests)) } // Were all the writeRequest deletes?. for i, wr := range mms.writeRequests { if wr.MetricFamilies != nil { t.Errorf("writeRequest at index %d was not a delete request", i) } } } prometheus-pushgateway-1.0.0+ds/handler/metrics.go000066400000000000000000000030221356076130400222600ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) var ( httpCnt = promauto.NewCounterVec( prometheus.CounterOpts{ Name: "pushgateway_http_requests_total", Help: "Total HTTP requests processed by the Pushgateway, excluding scrapes.", }, []string{"handler", "code", "method"}, ) httpPushSize = promauto.NewSummaryVec( prometheus.SummaryOpts{ Name: "pushgateway_http_push_size_bytes", Help: "HTTP request size for pushes to the Pushgateway.", Objectives: map[float64]float64{0.1: 0.01, 0.5: 0.05, 0.9: 0.01}, }, []string{"method"}, ) httpPushDuration = promauto.NewSummaryVec( prometheus.SummaryOpts{ Name: "pushgateway_http_push_duration_seconds", Help: "HTTP request duration for pushes to the Pushgateway.", Objectives: map[float64]float64{0.1: 0.01, 0.5: 0.05, 0.9: 0.01}, }, []string{"method"}, ) ) prometheus-pushgateway-1.0.0+ds/handler/misc.go000066400000000000000000000047451356076130400215620ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "io" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/server" "github.com/prometheus/pushgateway/storage" ) // Healthy is used to report the health of the Pushgateway. It currently only // uses the Healthy method of the MetricScore to detect healthy state. // // The returned handler is already instrumented for Prometheus. func Healthy(ms storage.MetricStore) http.Handler { return promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "healthy"}), http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { err := ms.Healthy() if err == nil { io.WriteString(w, "OK") } else { http.Error(w, err.Error(), 500) } }), ) } // Ready is used to report if the Pushgateway is ready to process requests. It // currently only uses the Ready method of the MetricScore to detect ready // state. // // The returned handler is already instrumented for Prometheus. func Ready(ms storage.MetricStore) http.Handler { return promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "ready"}), http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { err := ms.Ready() if err == nil { io.WriteString(w, "OK") } else { http.Error(w, err.Error(), 500) } }), ) } // Static serves the static files from the provided http.FileSystem. // // The returned handler is already instrumented for Prometheus. func Static(root http.FileSystem, prefix string) http.Handler { if prefix == "/" { prefix = "" } handler := server.StaticFileServer(root) return promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "static"}), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r.URL.Path = r.URL.Path[len(prefix):] handler.ServeHTTP(w, r) }), ) } prometheus-pushgateway-1.0.0+ds/handler/misc_test.go000066400000000000000000000035411356076130400226120ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "fmt" "log" "net/http" "net/http/httptest" "os" "testing" ) type fakeFileSystem struct { files map[string]struct{} } // Open implements the http.FileSystem interface // // If a file is present, no error will be returned. // This implementation always returns a nil File. func (f *fakeFileSystem) Open(name string) (http.File, error) { log.Println("requesting" + name) if _, ok := f.files[name]; !ok { return nil, os.ErrNotExist } return os.Open("misc_test.go") // return nil, nil } func TestRoutePrefixForStatic(t *testing.T) { fs := &fakeFileSystem{map[string]struct{}{ "/index.js": struct{}{}, }} for _, test := range []struct { prefix string path string code int }{ {"/", "/index.js", 200}, {"/", "/missing.js", 404}, {"/route-prefix", "/index.js", 200}, {"/route-prefix", "/missing.js", 404}, } { test := test t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { t.Parallel() req, err := http.NewRequest( http.MethodGet, "http://example.com"+test.prefix+test.path, nil, ) if err != nil { t.Fatal(err) } w := httptest.NewRecorder() static := Static(fs, test.prefix) static.ServeHTTP(w, req) if test.code != w.Code { t.Errorf("Wanted %d, got %d.", test.code, w.Code) } }) } } prometheus-pushgateway-1.0.0+ds/handler/push.go000066400000000000000000000143261356076130400216020ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "encoding/base64" "fmt" "io" "mime" "net/http" "strings" "sync" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/julienschmidt/httprouter" "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" "github.com/prometheus/pushgateway/storage" ) const ( // Base64Suffix is appended to a label name in the request URL path to // mark the following label value as base64 encoded. Base64Suffix = "@base64" ) // Push returns an http.Handler which accepts samples over HTTP and stores them // in the MetricStore. If replace is true, all metrics for the job and instance // given by the request are deleted before new ones are stored. // // The returned handler is already instrumented for Prometheus. func Push( ms storage.MetricStore, replace bool, jobBase64Encoded bool, logger log.Logger, ) func(http.ResponseWriter, *http.Request, httprouter.Params) { var ps httprouter.Params var mtx sync.Mutex // Protects ps. handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { job := ps.ByName("job") if jobBase64Encoded { var err error if job, err = decodeBase64(job); err != nil { http.Error(w, fmt.Sprintf("invalid base64 encoding in job name %q: %v", ps.ByName("job"), err), http.StatusBadRequest) level.Debug(logger).Log("msg", "invalid base64 encoding in job name", "job", ps.ByName("job"), "err", err.Error()) return } } labelsString := ps.ByName("labels") mtx.Unlock() labels, err := splitLabels(labelsString) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) level.Debug(logger).Log("msg", "failed to parse URL", "url", labelsString, "err", err.Error()) return } if job == "" { http.Error(w, "job name is required", http.StatusBadRequest) level.Debug(logger).Log("msg", "job name is required") return } labels["job"] = job var metricFamilies map[string]*dto.MetricFamily ctMediatype, ctParams, ctErr := mime.ParseMediaType(r.Header.Get("Content-Type")) if ctErr == nil && ctMediatype == "application/vnd.google.protobuf" && ctParams["encoding"] == "delimited" && ctParams["proto"] == "io.prometheus.client.MetricFamily" { metricFamilies = map[string]*dto.MetricFamily{} for { mf := &dto.MetricFamily{} if _, err = pbutil.ReadDelimited(r.Body, mf); err != nil { if err == io.EOF { err = nil } break } metricFamilies[mf.GetName()] = mf } } else { // We could do further content-type checks here, but the // fallback for now will anyway be the text format // version 0.0.4, so just go for it and see if it works. var parser expfmt.TextParser metricFamilies, err = parser.TextToMetricFamilies(r.Body) } if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) level.Debug(logger).Log("msg", "failed to parse text", "err", err.Error()) return } now := time.Now() errCh := make(chan error, 1) errReceived := false ms.SubmitWriteRequest(storage.WriteRequest{ Labels: labels, Timestamp: now, MetricFamilies: metricFamilies, Replace: replace, Done: errCh, }) for err := range errCh { // Send only first error via HTTP, but log all of them. // TODO(beorn): Consider sending all errors once we // have a use case. (Currently, at most one error is // produced.) if !errReceived { http.Error( w, fmt.Sprintf("pushed metrics are invalid or inconsistent with existing metrics: %v", err), http.StatusBadRequest, ) } level.Error(logger).Log( "msg", "pushed metrics are invalid or inconsistent with existing metrics", "method", r.Method, "source", r.RemoteAddr, "err", err.Error(), ) errReceived = true } }) instrumentedHandler := promhttp.InstrumentHandlerRequestSize( httpPushSize, promhttp.InstrumentHandlerDuration( httpPushDuration, promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "push"}), handler, ))) return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { mtx.Lock() ps = params instrumentedHandler.ServeHTTP(w, r) } } // decodeBase64 decodes the provided string using the “Base 64 Encoding with URL // and Filename Safe Alphabet” (RFC 4648). Padding characters (i.e. trailing // '=') are ignored. func decodeBase64(s string) (string, error) { b, err := base64.RawURLEncoding.DecodeString(strings.TrimRight(s, "=")) return string(b), err } // splitLabels splits a labels string into a label map mapping names to values. func splitLabels(labels string) (map[string]string, error) { result := map[string]string{} if len(labels) <= 1 { return result, nil } components := strings.Split(labels[1:], "/") if len(components)%2 != 0 { return nil, fmt.Errorf("odd number of components in label string %q", labels) } for i := 0; i < len(components)-1; i += 2 { name, value := components[i], components[i+1] trimmedName := strings.TrimSuffix(name, Base64Suffix) if !model.LabelNameRE.MatchString(trimmedName) || strings.HasPrefix(trimmedName, model.ReservedLabelPrefix) { return nil, fmt.Errorf("improper label name %q", trimmedName) } if name == trimmedName { result[name] = value continue } decodedValue, err := decodeBase64(value) if err != nil { return nil, fmt.Errorf("invalid base64 encoding for label %s=%q: %v", trimmedName, value, err) } result[trimmedName] = decodedValue } return result, nil } prometheus-pushgateway-1.0.0+ds/handler/status.go000066400000000000000000000067631356076130400221540ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "encoding/base64" "fmt" "html" "html/template" "io/ioutil" "net/http" "strconv" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" "github.com/prometheus/pushgateway/storage" ) type data struct { MetricGroups storage.GroupingKeyToMetricGroup Flags map[string]string BuildInfo map[string]string Birth time.Time PathPrefix string counter int } func (d *data) Count() int { d.counter++ return d.counter } func (data) FormatTimestamp(ts int64) string { return time.Unix(ts/1000, ts%1000*1000000).String() } // Status serves the status page. // // The returned handler is already instrumented for Prometheus. func Status( ms storage.MetricStore, root http.FileSystem, flags map[string]string, pathPrefix string, logger log.Logger, ) http.Handler { birth := time.Now() return promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "status"}), http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { t := template.New("status") t.Funcs(template.FuncMap{ "value": func(f float64) string { return strconv.FormatFloat(f, 'f', -1, 64) }, "timeFormat": func(t time.Time) string { return t.Format(time.RFC3339) }, "base64": func(s string) string { return base64.RawURLEncoding.EncodeToString([]byte(s)) }, }) f, err := root.Open("template.html") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) level.Error(logger).Log("msg", "error loading template.html", "err", err.Error()) return } defer f.Close() tpl, err := ioutil.ReadAll(f) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) level.Error(logger).Log("msg", "error reading template.html", "err", err.Error()) return } _, err = t.Parse(string(tpl)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) level.Error(logger).Log("msg", "error parsing template", "err", err.Error()) return } buildInfo := map[string]string{ "version": version.Version, "revision": version.Revision, "branch": version.Branch, "buildUser": version.BuildUser, "buildDate": version.BuildDate, "goVersion": version.GoVersion, } d := &data{ MetricGroups: ms.GetMetricFamiliesMap(), BuildInfo: buildInfo, Birth: birth, PathPrefix: pathPrefix, Flags: flags, } err = t.Execute(w, d) if err != nil { // Hack to get a visible error message right at the top. fmt.Fprintf(w, `
Error executing template: %s
`, html.EscapeString(err.Error())) fmt.Fprintln(w, ``) } }), ) } prometheus-pushgateway-1.0.0+ds/handler/status_test.go000066400000000000000000000030471356076130400232030ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/prometheus/pushgateway/asset" "github.com/prometheus/pushgateway/storage" ) func TestPathPrefixPresenceInPage(t *testing.T) { flags := map[string]string{ "web.listen-address": ":9091", "web.telemetry-path": "/metrics", "web.external-url": "http://web-external-url.com", } pathPrefix := "/foobar" ms := storage.NewDiskMetricStore("", time.Minute, nil, logger) status := Status(ms, asset.Assets, flags, pathPrefix, logger) defer ms.Shutdown() w := httptest.NewRecorder() status.ServeHTTP(w, &http.Request{}) if http.StatusOK != w.Code { t.Fatalf("Wanted status %d, got %d", http.StatusOK, w.Code) } rawBody, err := ioutil.ReadAll(w.Result().Body) if err != nil { t.Fatal(err) } body := string(rawBody) if !strings.Contains(body, pathPrefix+"/static") { t.Errorf("Body does not contain %q.", pathPrefix+"/static") t.Log(body) } } prometheus-pushgateway-1.0.0+ds/handler/wipe.go000066400000000000000000000031171356076130400215630ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "net/http" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/pushgateway/storage" ) // WipeMetricStore deletes all the metrics in MetricStore. // // The returned handler is already instrumented for Prometheus. func WipeMetricStore( ms storage.MetricStore, logger log.Logger) http.Handler { return promhttp.InstrumentHandlerCounter( httpCnt.MustCurryWith(prometheus.Labels{"handler": "wipe"}), http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusAccepted) level.Debug(logger).Log("msg", "start wiping metric store") // Delete all metric groups by sending write requests with MetricFamilies equal to nil. for _, group := range ms.GetMetricFamiliesMap() { ms.SubmitWriteRequest(storage.WriteRequest{ Labels: group.Labels, Timestamp: time.Now(), }) } })) } prometheus-pushgateway-1.0.0+ds/main.go000066400000000000000000000215431356076130400201310ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "net" "net/http" "net/http/pprof" "net/url" "os" "os/signal" "path" "path/filepath" "strings" "syscall" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/julienschmidt/httprouter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" "github.com/prometheus/common/version" "gopkg.in/alecthomas/kingpin.v2" dto "github.com/prometheus/client_model/go" promlogflag "github.com/prometheus/common/promlog/flag" "github.com/prometheus/pushgateway/asset" "github.com/prometheus/pushgateway/handler" "github.com/prometheus/pushgateway/storage" ) func init() { prometheus.MustRegister(version.NewCollector("pushgateway")) } // logFunc in an adaptor to plug gokit logging into promhttp.HandlerOpts. type logFunc func(...interface{}) error func (lf logFunc) Println(v ...interface{}) { lf("msg", fmt.Sprintln(v...)) } func main() { var ( app = kingpin.New(filepath.Base(os.Args[0]), "The Pushgateway") listenAddress = app.Flag("web.listen-address", "Address to listen on for the web interface, API, and telemetry.").Default(":9091").String() metricsPath = app.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() externalURL = app.Flag("web.external-url", "The URL under which the Pushgateway is externally reachable.").Default("").URL() routePrefix = app.Flag("web.route-prefix", "Prefix for the internal routes of web endpoints. Defaults to the path of --web.external-url.").Default("").String() enableLifeCycle = app.Flag("web.enable-lifecycle", "Enable shutdown via HTTP request.").Default("false").Bool() enableAdminAPI = app.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").Default("false").Bool() persistenceFile = app.Flag("persistence.file", "File to persist metrics. If empty, metrics are only kept in memory.").Default("").String() persistenceInterval = app.Flag("persistence.interval", "The minimum interval at which to write out the persistence file.").Default("5m").Duration() promlogConfig = promlog.Config{} ) promlogflag.AddFlags(app, &promlogConfig) app.Version(version.Print("pushgateway")) app.HelpFlag.Short('h') kingpin.MustParse(app.Parse(os.Args[1:])) logger := promlog.New(&promlogConfig) *routePrefix = computeRoutePrefix(*routePrefix, *externalURL) externalPathPrefix := computeRoutePrefix("", *externalURL) level.Info(logger).Log("msg", "starting pushgateway", "version", version.Info()) level.Info(logger).Log("build_context", version.BuildContext()) level.Debug(logger).Log("msg", "external URL", "url", *externalURL) level.Debug(logger).Log("msg", "path prefix used externally", "path", externalPathPrefix) level.Debug(logger).Log("msg", "path prefix for internal routing", "path", *routePrefix) // flags is used to show command line flags on the status page. // Kingpin default flags are excluded as they would be confusing. flags := map[string]string{} boilerplateFlags := kingpin.New("", "").Version("") for _, f := range app.Model().Flags { if boilerplateFlags.GetFlag(f.Name) == nil { flags[f.Name] = f.Value.String() } } ms := storage.NewDiskMetricStore(*persistenceFile, *persistenceInterval, prometheus.DefaultGatherer, logger) // Create a Gatherer combining the DefaultGatherer and the metrics from the metric store. g := prometheus.Gatherers{ prometheus.DefaultGatherer, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return ms.GetMetricFamilies(), nil }), } r := httprouter.New() r.Handler("GET", *routePrefix+"/-/healthy", handler.Healthy(ms)) r.Handler("GET", *routePrefix+"/-/ready", handler.Ready(ms)) r.Handler( "GET", path.Join(*routePrefix, *metricsPath), promhttp.HandlerFor(g, promhttp.HandlerOpts{ ErrorLog: logFunc(level.Error(logger).Log), }), ) if *enableAdminAPI { // To be consistent with Prometheus codebase and provide endpoint versioning, we use the same path // as Prometheus for its admin endpoints, even if this may feel excesive for just one simple endpoint // this will likely change over time. r.Handler("PUT", *routePrefix+"/api/v1/admin/wipe", handler.WipeMetricStore(ms, logger)) } // Handlers for pushing and deleting metrics. pushAPIPath := *routePrefix + "/metrics" for _, suffix := range []string{"", handler.Base64Suffix} { jobBase64Encoded := suffix == handler.Base64Suffix r.PUT(pushAPIPath+"/job"+suffix+"/:job/*labels", handler.Push(ms, true, jobBase64Encoded, logger)) r.POST(pushAPIPath+"/job"+suffix+"/:job/*labels", handler.Push(ms, false, jobBase64Encoded, logger)) r.DELETE(pushAPIPath+"/job"+suffix+"/:job/*labels", handler.Delete(ms, jobBase64Encoded, logger)) r.PUT(pushAPIPath+"/job"+suffix+"/:job", handler.Push(ms, true, jobBase64Encoded, logger)) r.POST(pushAPIPath+"/job"+suffix+"/:job", handler.Push(ms, false, jobBase64Encoded, logger)) r.DELETE(pushAPIPath+"/job"+suffix+"/:job", handler.Delete(ms, jobBase64Encoded, logger)) } r.Handler("GET", *routePrefix+"/static/*filepath", handler.Static(asset.Assets, *routePrefix)) statusHandler := handler.Status(ms, asset.Assets, flags, externalPathPrefix, logger) r.Handler("GET", *routePrefix+"/status", statusHandler) r.Handler("GET", *routePrefix+"/", statusHandler) // Re-enable pprof. r.GET(*routePrefix+"/debug/pprof/*pprof", handlePprof) level.Info(logger).Log("listen_address", *listenAddress) l, err := net.Listen("tcp", *listenAddress) if err != nil { level.Error(logger).Log("err", err) os.Exit(1) } quitCh := make(chan struct{}) quitHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Requesting termination... Goodbye!") close(quitCh) }) forbiddenAPINotEnabled := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("Lifecycle API is not enabled.")) }) if *enableLifeCycle { r.Handler("PUT", *routePrefix+"/-/quit", quitHandler) r.Handler("POST", *routePrefix+"/-/quit", quitHandler) } else { r.Handler("PUT", *routePrefix+"/-/quit", forbiddenAPINotEnabled) r.Handler("POST", *routePrefix+"/-/quit", forbiddenAPINotEnabled) } r.Handler("GET", "/-/quit", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusMethodNotAllowed) w.Write([]byte("Only POST or PUT requests allowed.")) })) go closeListenerOnQuit(l, quitCh, logger) err = (&http.Server{Addr: *listenAddress, Handler: r}).Serve(l) level.Error(logger).Log("msg", "HTTP server stopped", "err", err) // To give running connections a chance to submit their payload, we wait // for 1sec, but we don't want to wait long (e.g. until all connections // are done) to not delay the shutdown. time.Sleep(time.Second) if err := ms.Shutdown(); err != nil { level.Error(logger).Log("msg", "problem shutting down metric storage", "err", err) } } func handlePprof(w http.ResponseWriter, r *http.Request, p httprouter.Params) { switch p.ByName("pprof") { case "/cmdline": pprof.Cmdline(w, r) case "/profile": pprof.Profile(w, r) case "/symbol": pprof.Symbol(w, r) default: pprof.Index(w, r) } } // computeRoutePrefix returns the effective route prefix based on the // provided flag values for --web.route-prefix and // --web.external-url. With prefix empty, the path of externalURL is // used instead. A prefix "/" results in an empty returned prefix. Any // non-empty prefix is normalized to start, but not to end, with "/". func computeRoutePrefix(prefix string, externalURL *url.URL) string { if prefix == "" { prefix = externalURL.Path } if prefix == "/" { prefix = "" } if prefix != "" { prefix = "/" + strings.Trim(prefix, "/") } return prefix } // closeListenerOnQuite closes the provided listener upon closing the provided // quitCh or upon receiving a SIGINT or SIGTERM. func closeListenerOnQuit(l net.Listener, quitCh <-chan struct{}, logger log.Logger) { notifier := make(chan os.Signal, 1) signal.Notify(notifier, os.Interrupt, syscall.SIGTERM) select { case <-notifier: level.Info(logger).Log("msg", "received SIGINT/SIGTERM; exiting gracefully...") break case <-quitCh: level.Warn(logger).Log("msg", "received termination request via web service, exiting gracefully...") break } l.Close() } prometheus-pushgateway-1.0.0+ds/resources/000077500000000000000000000000001356076130400206635ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/resources/static/000077500000000000000000000000001356076130400221525ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/resources/static/bootstrap4-glyphicons/000077500000000000000000000000001356076130400264305ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/resources/static/bootstrap4-glyphicons/css/000077500000000000000000000000001356076130400272205ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/resources/static/bootstrap4-glyphicons/css/bootstrap-glyphicons.css000066400000000000000000000342731356076130400341350ustar00rootroot00000000000000/*! * Bootstrap v3.3.7 (http://getbootstrap.com) * Copyright 2011-2018 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ @font-face { font-family: 'Glyphicons Halflings'; src: url('../fonts/glyphicons/glyphicons-halflings-regular.eot'); src: url('../fonts/glyphicons/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); } .glyphicon { position: relative; top: 1px; display: inline-block; font-family: 'Glyphicons Halflings'; font-style: normal; font-weight: normal; line-height: 1; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } .glyphicon-asterisk:before { content: "\002a"; } .glyphicon-plus:before { content: "\002b"; } .glyphicon-euro:before, .glyphicon-eur:before { content: "\20ac"; } .glyphicon-minus:before { content: "\2212"; } .glyphicon-cloud:before { content: "\2601"; } .glyphicon-envelope:before { content: "\2709"; } .glyphicon-pencil:before { content: "\270f"; } .glyphicon-glass:before { content: "\e001"; } .glyphicon-music:before { content: "\e002"; } .glyphicon-search:before { content: "\e003"; } .glyphicon-heart:before { content: "\e005"; } .glyphicon-star:before { content: "\e006"; } .glyphicon-star-empty:before { content: "\e007"; } .glyphicon-user:before { content: "\e008"; } .glyphicon-film:before { content: "\e009"; } .glyphicon-th-large:before { content: "\e010"; } .glyphicon-th:before { content: "\e011"; } .glyphicon-th-list:before { content: "\e012"; } .glyphicon-ok:before { content: "\e013"; } .glyphicon-remove:before { content: "\e014"; } .glyphicon-zoom-in:before { content: "\e015"; } .glyphicon-zoom-out:before { content: "\e016"; } .glyphicon-off:before { content: "\e017"; } .glyphicon-signal:before { content: "\e018"; } .glyphicon-cog:before { content: "\e019"; } .glyphicon-trash:before { content: "\e020"; } .glyphicon-home:before { content: "\e021"; } .glyphicon-file:before { content: "\e022"; } .glyphicon-time:before { content: "\e023"; } .glyphicon-road:before { content: "\e024"; } .glyphicon-download-alt:before { content: "\e025"; } .glyphicon-download:before { content: "\e026"; } .glyphicon-upload:before { content: "\e027"; } .glyphicon-inbox:before { content: "\e028"; } .glyphicon-play-circle:before { content: "\e029"; } .glyphicon-repeat:before { content: "\e030"; } .glyphicon-refresh:before { content: "\e031"; } .glyphicon-list-alt:before { content: "\e032"; } .glyphicon-lock:before { content: "\e033"; } .glyphicon-flag:before { content: "\e034"; } .glyphicon-headphones:before { content: "\e035"; } .glyphicon-volume-off:before { content: "\e036"; } .glyphicon-volume-down:before { content: "\e037"; } .glyphicon-volume-up:before { content: "\e038"; } .glyphicon-qrcode:before { content: "\e039"; } .glyphicon-barcode:before { content: "\e040"; } .glyphicon-tag:before { content: "\e041"; } .glyphicon-tags:before { content: "\e042"; } .glyphicon-book:before { content: "\e043"; } .glyphicon-bookmark:before { content: "\e044"; } .glyphicon-print:before { content: "\e045"; } .glyphicon-camera:before { content: "\e046"; } .glyphicon-font:before { content: "\e047"; } .glyphicon-bold:before { content: "\e048"; } .glyphicon-italic:before { content: "\e049"; } .glyphicon-text-height:before { content: "\e050"; } .glyphicon-text-width:before { content: "\e051"; } .glyphicon-align-left:before { content: "\e052"; } .glyphicon-align-center:before { content: "\e053"; } .glyphicon-align-right:before { content: "\e054"; } .glyphicon-align-justify:before { content: "\e055"; } .glyphicon-list:before { content: "\e056"; } .glyphicon-indent-left:before { content: "\e057"; } .glyphicon-indent-right:before { content: "\e058"; } .glyphicon-facetime-video:before { content: "\e059"; } .glyphicon-picture:before { content: "\e060"; } .glyphicon-map-marker:before { content: "\e062"; } .glyphicon-adjust:before { content: "\e063"; } .glyphicon-tint:before { content: "\e064"; } .glyphicon-edit:before { content: "\e065"; } .glyphicon-share:before { content: "\e066"; } .glyphicon-check:before { content: "\e067"; } .glyphicon-move:before { content: "\e068"; } .glyphicon-step-backward:before { content: "\e069"; } .glyphicon-fast-backward:before { content: "\e070"; } .glyphicon-backward:before { content: "\e071"; } .glyphicon-play:before { content: "\e072"; } .glyphicon-pause:before { content: "\e073"; } .glyphicon-stop:before { content: "\e074"; } .glyphicon-forward:before { content: "\e075"; } .glyphicon-fast-forward:before { content: "\e076"; } .glyphicon-step-forward:before { content: "\e077"; } .glyphicon-eject:before { content: "\e078"; } .glyphicon-chevron-left:before { content: "\e079"; } .glyphicon-chevron-right:before { content: "\e080"; } .glyphicon-plus-sign:before { content: "\e081"; } .glyphicon-minus-sign:before { content: "\e082"; } .glyphicon-remove-sign:before { content: "\e083"; } .glyphicon-ok-sign:before { content: "\e084"; } .glyphicon-question-sign:before { content: "\e085"; } .glyphicon-info-sign:before { content: "\e086"; } .glyphicon-screenshot:before { content: "\e087"; } .glyphicon-remove-circle:before { content: "\e088"; } .glyphicon-ok-circle:before { content: "\e089"; } .glyphicon-ban-circle:before { content: "\e090"; } .glyphicon-arrow-left:before { content: "\e091"; } .glyphicon-arrow-right:before { content: "\e092"; } .glyphicon-arrow-up:before { content: "\e093"; } .glyphicon-arrow-down:before { content: "\e094"; } .glyphicon-share-alt:before { content: "\e095"; } .glyphicon-resize-full:before { content: "\e096"; } .glyphicon-resize-small:before { content: "\e097"; } .glyphicon-exclamation-sign:before { content: "\e101"; } .glyphicon-gift:before { content: "\e102"; } .glyphicon-leaf:before { content: "\e103"; } .glyphicon-fire:before { content: "\e104"; } .glyphicon-eye-open:before { content: "\e105"; } .glyphicon-eye-close:before { content: "\e106"; } .glyphicon-warning-sign:before { content: "\e107"; } .glyphicon-plane:before { content: "\e108"; } .glyphicon-calendar:before { content: "\e109"; } .glyphicon-random:before { content: "\e110"; } .glyphicon-comment:before { content: "\e111"; } .glyphicon-magnet:before { content: "\e112"; } .glyphicon-chevron-up:before { content: "\e113"; } .glyphicon-chevron-down:before { content: "\e114"; } .glyphicon-retweet:before { content: "\e115"; } .glyphicon-shopping-cart:before { content: "\e116"; } .glyphicon-folder-close:before { content: "\e117"; } .glyphicon-folder-open:before { content: "\e118"; } .glyphicon-resize-vertical:before { content: "\e119"; } .glyphicon-resize-horizontal:before { content: "\e120"; } .glyphicon-hdd:before { content: "\e121"; } .glyphicon-bullhorn:before { content: "\e122"; } .glyphicon-bell:before { content: "\e123"; } .glyphicon-certificate:before { content: "\e124"; } .glyphicon-thumbs-up:before { content: "\e125"; } .glyphicon-thumbs-down:before { content: "\e126"; } .glyphicon-hand-right:before { content: "\e127"; } .glyphicon-hand-left:before { content: "\e128"; } .glyphicon-hand-up:before { content: "\e129"; } .glyphicon-hand-down:before { content: "\e130"; } .glyphicon-circle-arrow-right:before { content: "\e131"; } .glyphicon-circle-arrow-left:before { content: "\e132"; } .glyphicon-circle-arrow-up:before { content: "\e133"; } .glyphicon-circle-arrow-down:before { content: "\e134"; } .glyphicon-globe:before { content: "\e135"; } .glyphicon-wrench:before { content: "\e136"; } .glyphicon-tasks:before { content: "\e137"; } .glyphicon-filter:before { content: "\e138"; } .glyphicon-briefcase:before { content: "\e139"; } .glyphicon-fullscreen:before { content: "\e140"; } .glyphicon-dashboard:before { content: "\e141"; } .glyphicon-paperclip:before { content: "\e142"; } .glyphicon-heart-empty:before { content: "\e143"; } .glyphicon-link:before { content: "\e144"; } .glyphicon-phone:before { content: "\e145"; } .glyphicon-pushpin:before { content: "\e146"; } .glyphicon-usd:before { content: "\e148"; } .glyphicon-gbp:before { content: "\e149"; } .glyphicon-sort:before { content: "\e150"; } .glyphicon-sort-by-alphabet:before { content: "\e151"; } .glyphicon-sort-by-alphabet-alt:before { content: "\e152"; } .glyphicon-sort-by-order:before { content: "\e153"; } .glyphicon-sort-by-order-alt:before { content: "\e154"; } .glyphicon-sort-by-attributes:before { content: "\e155"; } .glyphicon-sort-by-attributes-alt:before { content: "\e156"; } .glyphicon-unchecked:before { content: "\e157"; } .glyphicon-expand:before { content: "\e158"; } .glyphicon-collapse-down:before { content: "\e159"; } .glyphicon-collapse-up:before { content: "\e160"; } .glyphicon-log-in:before { content: "\e161"; } .glyphicon-flash:before { content: "\e162"; } .glyphicon-log-out:before { content: "\e163"; } .glyphicon-new-window:before { content: "\e164"; } .glyphicon-record:before { content: "\e165"; } .glyphicon-save:before { content: "\e166"; } .glyphicon-open:before { content: "\e167"; } .glyphicon-saved:before { content: "\e168"; } .glyphicon-import:before { content: "\e169"; } .glyphicon-export:before { content: "\e170"; } .glyphicon-send:before { content: "\e171"; } .glyphicon-floppy-disk:before { content: "\e172"; } .glyphicon-floppy-saved:before { content: "\e173"; } .glyphicon-floppy-remove:before { content: "\e174"; } .glyphicon-floppy-save:before { content: "\e175"; } .glyphicon-floppy-open:before { content: "\e176"; } .glyphicon-credit-card:before { content: "\e177"; } .glyphicon-transfer:before { content: "\e178"; } .glyphicon-cutlery:before { content: "\e179"; } .glyphicon-header:before { content: "\e180"; } .glyphicon-compressed:before { content: "\e181"; } .glyphicon-earphone:before { content: "\e182"; } .glyphicon-phone-alt:before { content: "\e183"; } .glyphicon-tower:before { content: "\e184"; } .glyphicon-stats:before { content: "\e185"; } .glyphicon-sd-video:before { content: "\e186"; } .glyphicon-hd-video:before { content: "\e187"; } .glyphicon-subtitles:before { content: "\e188"; } .glyphicon-sound-stereo:before { content: "\e189"; } .glyphicon-sound-dolby:before { content: "\e190"; } .glyphicon-sound-5-1:before { content: "\e191"; } .glyphicon-sound-6-1:before { content: "\e192"; } .glyphicon-sound-7-1:before { content: "\e193"; } .glyphicon-copyright-mark:before { content: "\e194"; } .glyphicon-registration-mark:before { content: "\e195"; } .glyphicon-cloud-download:before { content: "\e197"; } .glyphicon-cloud-upload:before { content: "\e198"; } .glyphicon-tree-conifer:before { content: "\e199"; } .glyphicon-tree-deciduous:before { content: "\e200"; } .glyphicon-cd:before { content: "\e201"; } .glyphicon-save-file:before { content: "\e202"; } .glyphicon-open-file:before { content: "\e203"; } .glyphicon-level-up:before { content: "\e204"; } .glyphicon-copy:before { content: "\e205"; } .glyphicon-paste:before { content: "\e206"; } .glyphicon-alert:before { content: "\e209"; } .glyphicon-equalizer:before { content: "\e210"; } .glyphicon-king:before { content: "\e211"; } .glyphicon-queen:before { content: "\e212"; } .glyphicon-pawn:before { content: "\e213"; } .glyphicon-bishop:before { content: "\e214"; } .glyphicon-knight:before { content: "\e215"; } .glyphicon-baby-formula:before { content: "\e216"; } .glyphicon-tent:before { content: "\26fa"; } .glyphicon-blackboard:before { content: "\e218"; } .glyphicon-bed:before { content: "\e219"; } .glyphicon-apple:before { content: "\f8ff"; } .glyphicon-erase:before { content: "\e221"; } .glyphicon-hourglass:before { content: "\231b"; } .glyphicon-lamp:before { content: "\e223"; } .glyphicon-duplicate:before { content: "\e224"; } .glyphicon-piggy-bank:before { content: "\e225"; } .glyphicon-scissors:before { content: "\e226"; } .glyphicon-bitcoin:before { content: "\e227"; } .glyphicon-btc:before { content: "\e227"; } .glyphicon-xbt:before { content: "\e227"; } .glyphicon-yen:before { content: "\00a5"; } .glyphicon-jpy:before { content: "\00a5"; } .glyphicon-ruble:before { content: "\20bd"; } .glyphicon-rub:before { content: "\20bd"; } .glyphicon-scale:before { content: "\e230"; } .glyphicon-ice-lolly:before { content: "\e231"; } .glyphicon-ice-lolly-tasted:before { content: "\e232"; } .glyphicon-education:before { content: "\e233"; } .glyphicon-option-horizontal:before { content: "\e234"; } .glyphicon-option-vertical:before { content: "\e235"; } .glyphicon-menu-hamburger:before { content: "\e236"; } .glyphicon-modal-window:before { content: "\e237"; } .glyphicon-oil:before { content: "\e238"; } .glyphicon-grain:before { content: "\e239"; } .glyphicon-sunglasses:before { content: "\e240"; } .glyphicon-text-size:before { content: "\e241"; } .glyphicon-text-color:before { content: "\e242"; } .glyphicon-text-background:before { content: "\e243"; } .glyphicon-object-align-top:before { content: "\e244"; } .glyphicon-object-align-bottom:before { content: "\e245"; } .glyphicon-object-align-horizontal:before { content: "\e246"; } .glyphicon-object-align-left:before { content: "\e247"; } .glyphicon-object-align-vertical:before { content: "\e248"; } .glyphicon-object-align-right:before { content: "\e249"; } .glyphicon-triangle-right:before { content: "\e250"; } .glyphicon-triangle-left:before { content: "\e251"; } .glyphicon-triangle-bottom:before { content: "\e252"; } .glyphicon-triangle-top:before { content: "\e253"; } .glyphicon-console:before { content: "\e254"; } .glyphicon-superscript:before { content: "\e255"; } .glyphicon-subscript:before { content: "\e256"; } .glyphicon-menu-left:before { content: "\e257"; } .glyphicon-menu-right:before { content: "\e258"; } .glyphicon-menu-down:before { content: "\e259"; } .glyphicon-menu-up:before { content: "\e260"; } prometheus-pushgateway-1.0.0+ds/resources/static/favicon.ico000066400000000000000000000353561356076130400243070ustar00rootroot00000000000000 h6  00 %F(  IDlݭݭwݲݭݭݳٙV====VڟJ0ܮݰݰݰݰݮF@҅bffffbԉz҃ڝ֒ݬݶٙڝݫߩީߤܴG޾ݵݯݫ֑>ڝڟߨASݲՏD@( @ *ng$80ؗݸݭݭݺܤ8Af,Dܢ68ԋݳܴܴܴܴܴܴݳܢ+sީޫݫݫݫݫݫݫݫݫݫݫޫݪՏw:Fs*Տ޶ݶݶݶݶݶݶݶݶݶݶݶݶݶܤ+n߿ݰܱܱܱܱܱܱܱܱܱܱܱܱݰ7VSkߤަKݮ޾~޿ި|\޹ݬgݰu۠ݲ`ߨPߧсުjޤݶR(ݱݯަzeݯݲݻݬݭ~5ݼӄbݵ|%ݭw7p޾ޫݯڝݶ1ޯ`k$<ݵߤ/0gݰ%bs0("`Y??(0` 9of1 )th 0$Qܴݭݮݶdo?Pߥިw.cfLvٛ]vݹB\bSݹݺݺݺݺݺݺݺݺݺݺݹuH?)zwxxzzzzzzzzzzzzzzxxwс ڝa1ڝٛt*޿޿޿޿޿޿޿޿޿޿޿޿޿޿޿޿޿޿޿߿0uUެ߭߭߭߭߭߭߭߭߭߭߭߭߭߭߭߭߭߭߭ެ9ܳݹpQMٛޤ~ݰ޶ԉߨݳ7gީ7bܸݬߤުӇdެt޶ٛPٛުٛԉݻ޾ݬܳߦݲߦݷߧzk9ݬI4ؗߤݸ ߨݰߥwoݻ߽ߨިO%ٙ߿`ݬJݪޤݳؗݳݪk*޾ީgߤ޾`vӇݷݭvߦXсߨ,ݳ5ޫݳݮ߭ݮ޾!Uݳݱo9<hݶq҃NgަOSܴٛ>2޲޺#n9\ $sdY)ZR"???prometheus-pushgateway-1.0.0+ds/resources/static/functions.js000066400000000000000000000072041356076130400245230ustar00rootroot00000000000000// Namespace. var pushgateway = {}; pushgateway.labels = {}; pushgateway.panel = null; pushgateway.switchToMetrics = function(){ $('#metrics-div').show(); $('#status-div').hide(); $('#metrics-li').addClass('active'); $('#status-li').removeClass('active'); } pushgateway.switchToStatus = function(){ $('#metrics-div').hide(); $('#status-div').show(); $('#metrics-li').removeClass('active'); $('#status-li').addClass('active'); } pushgateway.showDelModal = function(labels, labelsEncoded, panelID, event){ event.stopPropagation(); // Don't trigger accordion collapse. pushgateway.labels = labelsEncoded; pushgateway.panel = $('#' + panelID).parent(); var components = []; for (var ln in labels) { components.push(ln + '="' + labels[ln] + '"') } $('#del-modal-msg').text( 'Do you really want to delete all metrics of group {' + components.join(', ') + '}?' ); $('#del-modal').modal('show'); } pushgateway.showDelAllModal = function(){ if (!$('button#del-all').hasClass('disabled')) { $('#del-modal-all-msg').text( 'Do you really want to delete all metrics from all metric groups?' ); $('#del-all-modal').modal('show'); } } pushgateway.deleteGroup = function(){ var pathElements = []; for (var ln in pushgateway.labels) { if (ln != 'job') { pathElements.push(encodeURIComponent(ln+'@base64')); pathElements.push(encodeURIComponent(pushgateway.labels[ln])); } } var groupPath = pathElements.join('/'); if (groupPath.length > 0) { groupPath = '/' + groupPath; } $.ajax({ type: 'DELETE', url: 'metrics/job@base64/' + encodeURIComponent(pushgateway.labels['job']) + groupPath, success: function(data, textStatus, jqXHR) { pushgateway.panel.remove(); pushgateway.decreaseDelAllCounter(); $('#del-modal').modal('hide'); }, error: function(jqXHR, textStatus, error) { alert('Deleting metric group failed: ' + error); } }); } pushgateway.deleteAllGroup = function(){ $.ajax({ type: 'PUT', url: 'api/v1/admin/wipe', success: function(data, textStatus, jqXHR) { $('div').each(function() { id = $(this).attr("id"); if (typeof id != 'undefined' && id.match(/^group-panel-[0-9]{1,}$/)) { $(this).parent().remove(); } }); pushgateway.setDelAllCounter(0); $('#del-all-modal').modal('hide'); }, error: function(jqXHR, textStatus, error) { alert('Deleting all metric groups failed: ' + error); } }); } pushgateway.decreaseDelAllCounter = function(){ var counter = parseInt($('span#del-all-counter').text()); pushgateway.setDelAllCounter(--counter); } pushgateway.setDelAllCounter = function(n){ $('span#del-all-counter').text(n); if (n <= 0) { pushgateway.disableDelAllGroupButton(); return; } pushgateway.enableDelAllGroupButton(); } pushgateway.enableDelAllGroupButton = function(){ $('button#del-all').removeClass('disabled'); } pushgateway.disableDelAllGroupButton = function(){ $('button#del-all').addClass('disabled'); } $(function () { $('div.collapse').on('show.bs.collapse', function (event) { $(this).prev().find('span.toggle-icon') .removeClass('glyphicon-collapse-down') .addClass('glyphicon-collapse-up'); event.stopPropagation(); }) $('div.collapse').on('hide.bs.collapse', function (event) { $(this).prev().find('span.toggle-icon') .removeClass('glyphicon-collapse-up') .addClass('glyphicon-collapse-down'); event.stopPropagation(); }) }) prometheus-pushgateway-1.0.0+ds/resources/static/prometheus.css000066400000000000000000000012421356076130400250560ustar00rootroot00000000000000/* Move down content because we have a fixed navbar that is 50px tall with 20px padding */ body { padding-top: 70px; padding-bottom: 20px; } .state_indicator { padding: 0 4px 0 4px; } .literal_output td { font-family: monospace; } .cursor-pointer { cursor: pointer; } .tooltip-inner { max-width: none; text-align: left; } .label { white-space: normal; } /* The navbar adds horizontal padding already */ .navbar .container-fluid { padding: 0; } /* This class provides style for containers that hold card like (without background) objects within container-fluid and out of accordion */ .blank-card { padding-bottom: 50px; padding-right: 25px; } prometheus-pushgateway-1.0.0+ds/resources/template.html000066400000000000000000000232411356076130400233660ustar00rootroot00000000000000 {{/* Copyright 2014 The Prometheus Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */}} Prometheus Pushgateway
{{$data := .}} {{if eq (index .Flags "web.enable-admin-api") "true"}}
{{end}}
{{range .MetricGroups}} {{$gCount := $data.Count}}

{{if not $metricGroup.LastPushSuccess}}Last push failed!{{end}}

{{range $name, $tmf := .Metrics }} {{$mCount := $data.Count}}

{{range $tmf.GetMetricFamily.Metric}} {{end}}
Labels Value
{{range .Label}} {{.Name}}="{{.GetValue}}" {{end}} {{with .Gauge}} {{value .GetValue}} {{else}} {{with .Counter}} {{value .GetValue}} {{else}} {{with .Untyped}} {{value .GetValue}} {{else}} {{with .Summary}} {{range .Quantile}} {{end}}
Quantile {{.GetQuantile}} {{value .GetValue}}
Sample Count {{.GetSampleCount}}
Sample Sum {{value .GetSampleSum}}
{{else}} {{with .Histogram}} {{range .Bucket}} {{end}}
Sample values ≤ {{value .GetUpperBound}} {{.GetCumulativeCount}}
Total sample Count {{.GetSampleCount}}
Sample Sum {{value .GetSampleSum}}
{{end}} {{end}} {{end}} {{end}} {{end}}
{{end}}
{{end}}
prometheus-pushgateway-1.0.0+ds/storage/000077500000000000000000000000001356076130400203155ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/storage/diskmetricstore.go000066400000000000000000000424301356076130400240620ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "encoding/gob" "errors" "fmt" "io/ioutil" "os" "path" "sort" "strings" "sync" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) const ( pushMetricName = "push_time_seconds" pushMetricHelp = "Last Unix time when changing this group in the Pushgateway succeeded." pushFailedMetricName = "push_failure_time_seconds" pushFailedMetricHelp = "Last Unix time when changing this group in the Pushgateway failed." writeQueueCapacity = 1000 ) var errTimestamp = errors.New("pushed metrics must not have timestamps") // DiskMetricStore is an implementation of MetricStore that persists metrics to // disk. type DiskMetricStore struct { lock sync.RWMutex // Protects metricFamilies. writeQueue chan WriteRequest drain chan struct{} done chan error metricGroups GroupingKeyToMetricGroup persistenceFile string predefinedHelp map[string]string logger log.Logger } type mfStat struct { pos int // Where in the result slice is the MetricFamily? copied bool // Has the MetricFamily already been copied? } // NewDiskMetricStore returns a DiskMetricStore ready to use. To cleanly shut it // down and free resources, the Shutdown() method has to be called. // // If persistenceFile is the empty string, no persisting to disk will // happen. Otherwise, a file of that name is used for persisting metrics to // disk. If the file already exists, metrics are read from it as part of the // start-up. Persisting is happening upon shutdown and after every write action, // but the latter will only happen persistenceDuration after the previous // persisting. // // If a non-nil Gatherer is provided, the help strings of metrics gathered by it // will be used as standard. Pushed metrics with deviating help strings will be // adjusted to avoid inconsistent expositions. func NewDiskMetricStore( persistenceFile string, persistenceInterval time.Duration, gatherPredefinedHelpFrom prometheus.Gatherer, logger log.Logger, ) *DiskMetricStore { // TODO: Do that outside of the constructor to allow the HTTP server to // serve /-/healthy and /-/ready earlier. dms := &DiskMetricStore{ writeQueue: make(chan WriteRequest, writeQueueCapacity), drain: make(chan struct{}), done: make(chan error), metricGroups: GroupingKeyToMetricGroup{}, persistenceFile: persistenceFile, logger: logger, } if err := dms.restore(); err != nil { level.Error(logger).Log("msg", "could not load persisted metrics", "err", err) } if helpStrings, err := extractPredefinedHelpStrings(gatherPredefinedHelpFrom); err == nil { dms.predefinedHelp = helpStrings } else { level.Error(logger).Log("msg", "could not gather metrics for predefined help strings", "err", err) } go dms.loop(persistenceInterval) return dms } // SubmitWriteRequest implements the MetricStore interface. func (dms *DiskMetricStore) SubmitWriteRequest(req WriteRequest) { dms.writeQueue <- req } // Shutdown implements the MetricStore interface. func (dms *DiskMetricStore) Shutdown() error { close(dms.drain) return <-dms.done } // Healthy implements the MetricStore interface. func (dms *DiskMetricStore) Healthy() error { // By taking the lock we check that there is no deadlock. dms.lock.Lock() defer dms.lock.Unlock() // A pushgateway that cannot be written to should not be // considered as healthy. if len(dms.writeQueue) == cap(dms.writeQueue) { return fmt.Errorf("write queue is full") } return nil } // Ready implements the MetricStore interface. func (dms *DiskMetricStore) Ready() error { return dms.Healthy() } // GetMetricFamilies implements the MetricStore interface. func (dms *DiskMetricStore) GetMetricFamilies() []*dto.MetricFamily { dms.lock.RLock() defer dms.lock.RUnlock() result := []*dto.MetricFamily{} mfStatByName := map[string]mfStat{} for _, group := range dms.metricGroups { for name, tmf := range group.Metrics { mf := tmf.GetMetricFamily() if mf == nil { level.Warn(dms.logger).Log("msg", "storage corruption detected, consider wiping the persistence file") continue } stat, exists := mfStatByName[name] if exists { existingMF := result[stat.pos] if !stat.copied { mfStatByName[name] = mfStat{ pos: stat.pos, copied: true, } existingMF = copyMetricFamily(existingMF) result[stat.pos] = existingMF } if mf.GetHelp() != existingMF.GetHelp() { level.Info(dms.logger).Log("msg", "metric families inconsistent help strings", "err", "Metric families have inconsistent help strings. The latter will have priority. This is bad. Fix your pushed metrics!", "new", mf, "old", existingMF) } // Type inconsistency cannot be fixed here. We will detect it during // gathering anyway, so no reason to log anything here. existingMF.Metric = append(existingMF.Metric, mf.Metric...) } else { copied := false if help, ok := dms.predefinedHelp[name]; ok && mf.GetHelp() != help { level.Info(dms.logger).Log("msg", "metric families overlap", "err", "Metric family has the same name as a metric family used by the Pushgateway itself but it has a different help string. Changing it to the standard help string. This is bad. Fix your pushed metrics!", "metric_family", mf, "standard_help", help) mf = copyMetricFamily(mf) copied = true mf.Help = proto.String(help) } mfStatByName[name] = mfStat{ pos: len(result), copied: copied, } result = append(result, mf) } } } return result } // GetMetricFamiliesMap implements the MetricStore interface. func (dms *DiskMetricStore) GetMetricFamiliesMap() GroupingKeyToMetricGroup { dms.lock.RLock() defer dms.lock.RUnlock() groupsCopy := make(GroupingKeyToMetricGroup, len(dms.metricGroups)) for k, g := range dms.metricGroups { metricsCopy := make(NameToTimestampedMetricFamilyMap, len(g.Metrics)) groupsCopy[k] = MetricGroup{Labels: g.Labels, Metrics: metricsCopy} for n, tmf := range g.Metrics { metricsCopy[n] = tmf } } return groupsCopy } func (dms *DiskMetricStore) loop(persistenceInterval time.Duration) { lastPersist := time.Now() persistScheduled := false lastWrite := time.Time{} persistDone := make(chan time.Time) var persistTimer *time.Timer checkPersist := func() { if dms.persistenceFile != "" && !persistScheduled && lastWrite.After(lastPersist) { persistTimer = time.AfterFunc( persistenceInterval-lastWrite.Sub(lastPersist), func() { persistStarted := time.Now() if err := dms.persist(); err != nil { level.Error(dms.logger).Log("msg", "error persisting metrics", "err", err) } else { level.Info(dms.logger).Log("msg", "metrics persisted", "file", dms.persistenceFile) } persistDone <- persistStarted }, ) persistScheduled = true } } for { select { case wr := <-dms.writeQueue: lastWrite = time.Now() if dms.checkWriteRequest(wr) { dms.processWriteRequest(wr) } else { dms.setPushFailedTimestamp(wr) } if wr.Done != nil { close(wr.Done) } checkPersist() case lastPersist = <-persistDone: persistScheduled = false checkPersist() // In case something has been written in the meantime. case <-dms.drain: // Prevent a scheduled persist from firing later. if persistTimer != nil { persistTimer.Stop() } // Now draining... for { select { case wr := <-dms.writeQueue: dms.processWriteRequest(wr) default: dms.done <- dms.persist() return } } } } } func (dms *DiskMetricStore) processWriteRequest(wr WriteRequest) { dms.lock.Lock() defer dms.lock.Unlock() key := groupingKeyFor(wr.Labels) if wr.MetricFamilies == nil { // No MetricFamilies means delete request. Delete the whole // metric group, and we are done here. delete(dms.metricGroups, key) return } // Otherwise, it's an update. group, ok := dms.metricGroups[key] if !ok { group = MetricGroup{ Labels: wr.Labels, Metrics: NameToTimestampedMetricFamilyMap{}, } dms.metricGroups[key] = group } else if wr.Replace { // For replace, we have to delete all metric families in the // group except pre-existing push timestamps. for name := range group.Metrics { if name != pushMetricName && name != pushFailedMetricName { delete(group.Metrics, name) } } } wr.MetricFamilies[pushMetricName] = newPushTimestampGauge(wr.Labels, wr.Timestamp) // Only add a zero push-failed metric if none is there yet, so that a // previously added fail timestamp is retained. if _, ok := group.Metrics[pushFailedMetricName]; !ok { wr.MetricFamilies[pushFailedMetricName] = newPushFailedTimestampGauge(wr.Labels, time.Time{}) } for name, mf := range wr.MetricFamilies { group.Metrics[name] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(mf), } } } func (dms *DiskMetricStore) setPushFailedTimestamp(wr WriteRequest) { dms.lock.Lock() defer dms.lock.Unlock() key := groupingKeyFor(wr.Labels) group, ok := dms.metricGroups[key] if !ok { group = MetricGroup{ Labels: wr.Labels, Metrics: NameToTimestampedMetricFamilyMap{}, } dms.metricGroups[key] = group } group.Metrics[pushFailedMetricName] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(newPushFailedTimestampGauge(wr.Labels, wr.Timestamp)), } // Only add a zero push metric if none is there yet, so that a // previously added push timestamp is retained. if _, ok := group.Metrics[pushMetricName]; !ok { group.Metrics[pushMetricName] = TimestampedMetricFamily{ Timestamp: wr.Timestamp, GobbableMetricFamily: (*GobbableMetricFamily)(newPushTimestampGauge(wr.Labels, time.Time{})), } } } // checkWriteRequest return if applying the provided WriteRequest will result in // a consistent state of metrics. The dms is not modified by the check. However, // the WriteRequest _will_ be sanitized: the MetricFamilies are ensured to // contain the grouping Labels after the check. If false is returned, the // causing error is written to the Done channel of the WriteRequest. func (dms *DiskMetricStore) checkWriteRequest(wr WriteRequest) bool { if wr.MetricFamilies == nil { // Delete request cannot create inconsistencies, and nothing has // to be sanitized. return true } var err error defer func() { if err != nil && wr.Done != nil { wr.Done <- err } }() if timestampsPresent(wr.MetricFamilies) { err = errTimestamp return false } for _, mf := range wr.MetricFamilies { sanitizeLabels(mf, wr.Labels) } // Construct a test dms, acting on a copy of the metrics, to test the // WriteRequest with. tdms := &DiskMetricStore{ metricGroups: dms.GetMetricFamiliesMap(), predefinedHelp: dms.predefinedHelp, logger: log.NewNopLogger(), } tdms.processWriteRequest(wr) // Construct a test Gatherer to check if consistent gathering is possible. tg := prometheus.Gatherers{ prometheus.DefaultGatherer, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return tdms.GetMetricFamilies(), nil }), } if _, err = tg.Gather(); err != nil { return false } return true } func (dms *DiskMetricStore) persist() error { // Check (again) if persistence is configured because some code paths // will call this method even if it is not. if dms.persistenceFile == "" { return nil } f, err := ioutil.TempFile( path.Dir(dms.persistenceFile), path.Base(dms.persistenceFile)+".in_progress.", ) if err != nil { return err } inProgressFileName := f.Name() e := gob.NewEncoder(f) dms.lock.RLock() err = e.Encode(dms.metricGroups) dms.lock.RUnlock() if err != nil { f.Close() os.Remove(inProgressFileName) return err } if err := f.Close(); err != nil { os.Remove(inProgressFileName) return err } return os.Rename(inProgressFileName, dms.persistenceFile) } func (dms *DiskMetricStore) restore() error { if dms.persistenceFile == "" { return nil } f, err := os.Open(dms.persistenceFile) if os.IsNotExist(err) { return nil } if err != nil { return err } defer f.Close() d := gob.NewDecoder(f) if err := d.Decode(&dms.metricGroups); err != nil { return err } return nil } func copyMetricFamily(mf *dto.MetricFamily) *dto.MetricFamily { return &dto.MetricFamily{ Name: mf.Name, Help: mf.Help, Type: mf.Type, Metric: append([]*dto.Metric{}, mf.Metric...), } } // groupingKeyFor creates a grouping key from the provided map of grouping // labels. The grouping key is created by joining all label names and values // together with model.SeparatorByte as a separator. The label names are sorted // lexicographically before joining. In that way, the grouping key is both // reproducible and unique. func groupingKeyFor(labels map[string]string) string { if len(labels) == 0 { // Super fast path. return "" } labelNames := make([]string, 0, len(labels)) for labelName := range labels { labelNames = append(labelNames, labelName) } sort.Strings(labelNames) sb := strings.Builder{} for i, labelName := range labelNames { sb.WriteString(labelName) sb.WriteByte(model.SeparatorByte) sb.WriteString(labels[labelName]) if i+1 < len(labels) { // No separator at the end. sb.WriteByte(model.SeparatorByte) } } return sb.String() } // extractPredefinedHelpStrings extracts all the HELP strings from the provided // gatherer so that the DiskMetricStore can fix deviations in pushed metrics. func extractPredefinedHelpStrings(g prometheus.Gatherer) (map[string]string, error) { if g == nil { return nil, nil } mfs, err := g.Gather() if err != nil { return nil, err } result := map[string]string{} for _, mf := range mfs { result[mf.GetName()] = mf.GetHelp() } return result, nil } func newPushTimestampGauge(groupingLabels map[string]string, t time.Time) *dto.MetricFamily { return newTimestampGauge(pushMetricName, pushMetricHelp, groupingLabels, t) } func newPushFailedTimestampGauge(groupingLabels map[string]string, t time.Time) *dto.MetricFamily { return newTimestampGauge(pushFailedMetricName, pushFailedMetricHelp, groupingLabels, t) } func newTimestampGauge(name, help string, groupingLabels map[string]string, t time.Time) *dto.MetricFamily { var ts float64 if !t.IsZero() { ts = float64(t.UnixNano()) / 1e9 } mf := &dto.MetricFamily{ Name: proto.String(name), Help: proto.String(help), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Gauge: &dto.Gauge{ Value: proto.Float64(ts), }, }, }, } sanitizeLabels(mf, groupingLabels) return mf } // sanitizeLabels ensures that all the labels in groupingLabels and the // `instance` label are present in the MetricFamily. The label values from // groupingLabels are set in each Metric, no matter what. After that, if the // 'instance' label is not present at all in a Metric, it will be created (with // an empty string as value). // // Finally, sanitizeLabels sorts the label pairs of all metrics. func sanitizeLabels(mf *dto.MetricFamily, groupingLabels map[string]string) { gLabelsNotYetDone := make(map[string]string, len(groupingLabels)) metric: for _, m := range mf.GetMetric() { for ln, lv := range groupingLabels { gLabelsNotYetDone[ln] = lv } hasInstanceLabel := false for _, lp := range m.GetLabel() { ln := lp.GetName() if lv, ok := gLabelsNotYetDone[ln]; ok { lp.Value = proto.String(lv) delete(gLabelsNotYetDone, ln) } if ln == string(model.InstanceLabel) { hasInstanceLabel = true } if len(gLabelsNotYetDone) == 0 && hasInstanceLabel { sort.Sort(labelPairs(m.Label)) continue metric } } for ln, lv := range gLabelsNotYetDone { m.Label = append(m.Label, &dto.LabelPair{ Name: proto.String(ln), Value: proto.String(lv), }) if ln == string(model.InstanceLabel) { hasInstanceLabel = true } delete(gLabelsNotYetDone, ln) // To prepare map for next metric. } if !hasInstanceLabel { m.Label = append(m.Label, &dto.LabelPair{ Name: proto.String(string(model.InstanceLabel)), Value: proto.String(""), }) } sort.Sort(labelPairs(m.Label)) } } // Checks if any timestamps have been specified. func timestampsPresent(metricFamilies map[string]*dto.MetricFamily) bool { for _, mf := range metricFamilies { for _, m := range mf.GetMetric() { if m.TimestampMs != nil { return true } } } return false } // labelPairs implements sort.Interface. It provides a sortable version of a // slice of dto.LabelPair pointers. type labelPairs []*dto.LabelPair func (s labelPairs) Len() int { return len(s) } func (s labelPairs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s labelPairs) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } prometheus-pushgateway-1.0.0+ds/storage/diskmetricstore_test.go000066400000000000000000001114241356076130400251210ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "io/ioutil" "math" "os" "path" "sort" "testing" "time" "github.com/go-kit/kit/log" "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) var ( logger = log.NewNopLogger() // Example metric families. Keep labels sorted lexicographically! mf1a = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(-3e3), }, }, }, } mf1b = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } mf1c = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance1"), }, { Name: proto.String("job"), Value: proto.String("job2"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } mf1d = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job3"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } mf1e = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } // mf1acd is merged from mf1a, mf1c, mf1d. mf1acd = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(-3e3), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance1"), }, { Name: proto.String("job"), Value: proto.String("job2"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job3"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } // mf1be is merged from mf1b and mf1e, with added empty instance label for mf1e. mf1be = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } // mf1ts is mf1a with a timestamp set. mf1ts = &dto.MetricFamily{ Name: proto.String("mf1"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(-3e3), }, TimestampMs: proto.Int64(103948), }, }, } mf2 = &dto.MetricFamily{ Name: proto.String("mf2"), Help: proto.String("doc string 2"), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("basename"), Value: proto.String("basevalue2"), }, { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, { Name: proto.String("labelname"), Value: proto.String("val2"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(math.Inf(+1)), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, { Name: proto.String("labelname"), Value: proto.String("val1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(math.Inf(-1)), }, }, }, } mf3 = &dto.MetricFamily{ Name: proto.String("mf3"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance1"), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(42), }, }, }, } mf4 = &dto.MetricFamily{ Name: proto.String("mf4"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance2"), }, { Name: proto.String("job"), Value: proto.String("job3"), }, }, Untyped: &dto.Untyped{ Value: proto.Float64(3.4345), }, }, }, } mf5 = &dto.MetricFamily{ Name: proto.String("mf5"), Type: dto.MetricType_SUMMARY.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String("instance5"), }, { Name: proto.String("job"), Value: proto.String("job5"), }, }, Summary: &dto.Summary{ SampleCount: proto.Uint64(0), SampleSum: proto.Float64(0), }, }, }, } mfh1 = &dto.MetricFamily{ Name: proto.String("mf_help"), Help: proto.String("Help string for mfh1."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(3948.838), }, }, }, } mfh2 = &dto.MetricFamily{ Name: proto.String("mf_help"), Help: proto.String("Help string for mfh2."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job2"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(83), }, }, }, } // Both mfh metrics with mfh1's help string. mfh12 = &dto.MetricFamily{ Name: proto.String("mf_help"), Help: proto.String("Help string for mfh1."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(3948.838), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job2"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(83), }, }, }, } // Both mfh metrics with mfh2's help string. mfh21 = &dto.MetricFamily{ Name: proto.String("mf_help"), Help: proto.String("Help string for mfh2."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(3948.838), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job2"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(83), }, }, }, } // mfgg is the usual go_goroutines gauge but with a different help text. mfgg = &dto.MetricFamily{ Name: proto.String("go_goroutines"), Help: proto.String("Inconsistent doc string, fixed version in mfggFixed."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(5), }, }, }, } // mfgc is the usual go_goroutines metric but mistyped as counter. mfgc = &dto.MetricFamily{ Name: proto.String("go_goroutines"), Help: proto.String("Number of goroutines that currently exist."), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Counter: &dto.Counter{ Value: proto.Float64(5), }, }, }, } mfggFixed = &dto.MetricFamily{ Name: proto.String("go_goroutines"), Help: proto.String("Number of goroutines that currently exist."), Type: dto.MetricType_GAUGE.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("instance"), Value: proto.String(""), }, { Name: proto.String("job"), Value: proto.String("job1"), }, }, Gauge: &dto.Gauge{ Value: proto.Float64(5), }, }, }, } ) // metricFamiliesMap creates the map needed in the MetricFamilies field of a // WriteRequest from the provided reference metric families. While doing so, it // creates deep copies of the metric families so that modifications that might // happen during processing of the WriteRequest will not affect the reference // metric families. func metricFamiliesMap(mfs ...*dto.MetricFamily) map[string]*dto.MetricFamily { m := map[string]*dto.MetricFamily{} for _, mf := range mfs { buf, err := proto.Marshal(mf) if err != nil { panic(err) } mfCopy := &dto.MetricFamily{} if err := proto.Unmarshal(buf, mfCopy); err != nil { panic(err) } m[mf.GetName()] = mfCopy } return m } func addGroup( mg GroupingKeyToMetricGroup, groupingLabels map[string]string, metrics NameToTimestampedMetricFamilyMap, ) { mg[groupingKeyFor(groupingLabels)] = MetricGroup{ Labels: groupingLabels, Metrics: metrics, } } func TestGetMetricFamilies(t *testing.T) { testTime := time.Now() mg := GroupingKeyToMetricGroup{} addGroup( mg, map[string]string{ "job": "job1", "instance": "instance1", }, NameToTimestampedMetricFamilyMap{ "mf2": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf2), }, }, ) addGroup( mg, map[string]string{ "job": "job1", "instance": "instance2", }, NameToTimestampedMetricFamilyMap{ "mf1": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf1a), }, "mf3": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf3), }, }, ) addGroup( mg, map[string]string{ "job": "job2", "instance": "instance1", }, NameToTimestampedMetricFamilyMap{ "mf1": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf1c), }, }, ) addGroup( mg, map[string]string{ "job": "job3", "instance": "instance1", }, NameToTimestampedMetricFamilyMap{}, ) addGroup( mg, map[string]string{ "job": "job3", "instance": "instance2", }, NameToTimestampedMetricFamilyMap{ "mf4": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf4), }, "mf1": TimestampedMetricFamily{ Timestamp: testTime, GobbableMetricFamily: (*GobbableMetricFamily)(mf1d), }, }, ) addGroup( mg, map[string]string{ "job": "job4", }, NameToTimestampedMetricFamilyMap{}, ) dms := &DiskMetricStore{metricGroups: mg} if err := checkMetricFamilies(dms, mf1acd, mf2, mf3, mf4); err != nil { t.Error(err) } } func TestAddDeletePersistRestore(t *testing.T) { tempDir, err := ioutil.TempDir("", "diskmetricstore.TestAddDeletePersistRestore.") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) fileName := path.Join(tempDir, "persistence") dms := NewDiskMetricStore(fileName, 100*time.Millisecond, nil, logger) // Submit a single simple metric family. ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", "instance": "instance1", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf3), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp := newPushTimestampGauge(grouping1, ts1) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, time.Time{}) if err := checkMetricFamilies( dms, mf3, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Submit two metric families for a different instance. ts2 := ts1.Add(time.Second) grouping2 := map[string]string{ "job": "job1", "instance": "instance2", } errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping2, Timestamp: ts2, MetricFamilies: metricFamiliesMap(mf1b, mf2), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping2, ts2).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping2, time.Time{}).Metric[0], ) if err := checkMetricFamilies( dms, mf1b, mf2, mf3, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } for err := range errCh { t.Fatal("Unexpected error:", err) } // Submit a metric family with the same name for the same job/instance again. // Should overwrite the previous metric family for the same job/instance ts3 := ts2.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping2, Timestamp: ts3, MetricFamilies: metricFamiliesMap(mf1a), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp.Metric[1] = newPushTimestampGauge(grouping2, ts3).Metric[0] if err := checkMetricFamilies( dms, mf1a, mf2, mf3, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Add a new group by job, with a summary without any observations yet. ts4 := ts3.Add(time.Second) grouping4 := map[string]string{ "job": "job5", } errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping4, Timestamp: ts4, MetricFamilies: metricFamiliesMap(mf5), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping4, ts4).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping4, time.Time{}).Metric[0], ) if err := checkMetricFamilies( dms, mf1a, mf2, mf3, mf5, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Shutdown the dms. if err := dms.Shutdown(); err != nil { t.Fatal(err) } // Load it again. dms = NewDiskMetricStore(fileName, 100*time.Millisecond, nil, logger) if err := checkMetricFamilies( dms, mf1a, mf2, mf3, mf5, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Spot-check timestamp. tmf := dms.metricGroups[groupingKeyFor(map[string]string{ "job": "job1", "instance": "instance2", })].Metrics["mf1"] if expected, got := ts3, tmf.Timestamp; !expected.Equal(got) { t.Errorf("Expected timestamp %v, got %v.", expected, got) } // Delete two groups. dms.SubmitWriteRequest(WriteRequest{ Labels: map[string]string{ "job": "job1", "instance": "instance1", }, }) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: map[string]string{ "job": "job5", }, Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping2, ts3) pushFailedTimestamp = newPushFailedTimestampGauge(grouping2, time.Time{}) if err := checkMetricFamilies( dms, mf1a, mf2, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Submit another one. ts5 := ts4.Add(time.Second) grouping5 := map[string]string{ "job": "job3", "instance": "instance2", } errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping5, Timestamp: ts5, MetricFamilies: metricFamiliesMap(mf4), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping5, ts5).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping5, time.Time{}).Metric[0], ) if err := checkMetricFamilies( dms, mf1a, mf2, mf4, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Delete a job does not remove anything because there is no suitable // grouping. errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: map[string]string{ "job": "job1", }, Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } if err := checkMetricFamilies( dms, mf1a, mf2, mf4, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Delete another group. errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping5, Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping2, ts3) pushFailedTimestamp = newPushFailedTimestampGauge(grouping2, time.Time{}) if err := checkMetricFamilies( dms, mf1a, mf2, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Check that no empty map entry for job3 was left behind. if _, stillExists := dms.metricGroups[groupingKeyFor(grouping5)]; stillExists { t.Error("An instance map for 'job3' still exists.") } // Shutdown the dms again, directly after a number of write request // (to check draining). for i := 0; i < 10; i++ { dms.SubmitWriteRequest(WriteRequest{ Labels: grouping5, Timestamp: ts5, MetricFamilies: metricFamiliesMap(mf4), }) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping5, ts5).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping5, time.Time{}).Metric[0], ) if err := checkMetricFamilies( dms, mf1a, mf2, mf4, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } } func TestNoPersistence(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, nil, logger) ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", "instance": "instance1", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf3), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp := newPushTimestampGauge(grouping1, ts1) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, time.Time{}) if err := checkMetricFamilies( dms, mf3, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } dms = NewDiskMetricStore("", 100*time.Millisecond, nil, logger) if err := checkMetricFamilies(dms); err != nil { t.Error(err) } if err := dms.Ready(); err != nil { t.Error(err) } if err := dms.Healthy(); err != nil { t.Error(err) } } func TestRejectTimestamps(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, nil, logger) ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", "instance": "instance1", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf1ts), Done: errCh, }) var err error for err = range errCh { if err != errTimestamp { t.Errorf("Expected error %q, got %q.", errTimestamp, err) } } if err == nil { t.Error("Expected error on pushing metric with timestamp.") } pushTimestamp := newPushTimestampGauge(grouping1, time.Time{}) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, ts1) if err := checkMetricFamilies( dms, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } } func TestRejectInconsistentPush(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, nil, logger) ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mfgc), Done: errCh, }) var err error for err = range errCh { } if err == nil { t.Error("Expected error pushing inconsistent go_goroutines metric.") } pushTimestamp := newPushTimestampGauge(grouping1, time.Time{}) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, ts1) if err := checkMetricFamilies( dms, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } ts2 := ts1.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts2, MetricFamilies: metricFamiliesMap(mf1a), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping1, ts2) if err := checkMetricFamilies( dms, mf1a, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } ts3 := ts2.Add(time.Second) grouping3 := map[string]string{ "job": "job1", "instance": "instance2", } errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping3, Timestamp: ts3, MetricFamilies: metricFamiliesMap(mf1b), Done: errCh, }) err = nil for err = range errCh { } if err == nil { t.Error("Expected error pushing duplicate mf1 metric.") } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping3, time.Time{}).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping3, ts3).Metric[0], ) if err := checkMetricFamilies( dms, mf1a, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } } func TestSanitizeLabels(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, nil, logger) // Push mf1c with the grouping matching mf1b, mf1b should end up in storage. ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", "instance": "instance2", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf1c), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp := newPushTimestampGauge(grouping1, ts1) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, time.Time{}) if err := checkMetricFamilies( dms, mf1b, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Push mf1e, missing the instance label. Again, mf1b should end up in storage. ts2 := ts1.Add(1) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts2, MetricFamilies: metricFamiliesMap(mf1e), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping1, ts2) if err := checkMetricFamilies( dms, mf1b, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Push mf1e, missing the instance label, into a grouping without the // instance label. The result in the storage should have an empty // instance label. ts3 := ts2.Add(1) grouping3 := map[string]string{ "job": "job1", } errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping3, Timestamp: ts3, MetricFamilies: metricFamiliesMap(mf1e), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp.Metric = append( pushTimestamp.Metric, newPushTimestampGauge(grouping3, ts3).Metric[0], ) pushFailedTimestamp.Metric = append( pushFailedTimestamp.Metric, newPushFailedTimestampGauge(grouping3, time.Time{}).Metric[0], ) if err := checkMetricFamilies( dms, mf1be, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } } func TestReplace(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, nil, logger) // First do an invalid push to set pushFailedTimestamp and to later // verify that it is retained and not replaced. ts1 := time.Now() grouping1 := map[string]string{ "job": "job1", } errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf1ts), Done: errCh, }) var err error for err = range errCh { if err != errTimestamp { t.Errorf("Expected error %q, got %q.", errTimestamp, err) } } if err == nil { t.Error("Expected error on pushing metric with timestamp.") } pushTimestamp := newPushTimestampGauge(grouping1, time.Time{}) pushFailedTimestamp := newPushFailedTimestampGauge(grouping1, ts1) if err := checkMetricFamilies( dms, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Now a valid update in replace mode. It doesn't replace anything, but // it already tests that the push-failed timestamp is retained. ts2 := ts1.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts2, MetricFamilies: metricFamiliesMap(mf1a), Done: errCh, Replace: true, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping1, ts2) if err := checkMetricFamilies( dms, mf1a, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Now push something else in replace mode that should replace mf1. ts3 := ts2.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts3, MetricFamilies: metricFamiliesMap(mf2), Done: errCh, Replace: true, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping1, ts3) if err := checkMetricFamilies( dms, mf2, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Another invalid push in replace mode, which should only update the // push-failed timestamp. ts4 := ts3.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts4, MetricFamilies: metricFamiliesMap(mf1ts), Done: errCh, Replace: true, }) err = nil for err = range errCh { if err != errTimestamp { t.Errorf("Expected error %q, got %q.", errTimestamp, err) } } if err == nil { t.Error("Expected error on pushing metric with timestamp.") } pushFailedTimestamp = newPushFailedTimestampGauge(grouping1, ts4) if err := checkMetricFamilies( dms, mf2, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Push an empty map (rather than a nil map) in replace mode. Should // delete everything except the push timestamps. ts5 := ts4.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: grouping1, Timestamp: ts5, MetricFamilies: metricFamiliesMap(), Done: errCh, Replace: true, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp = newPushTimestampGauge(grouping1, ts5) if err := checkMetricFamilies( dms, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } } func TestGetMetricFamiliesMap(t *testing.T) { tempDir, err := ioutil.TempDir("", "diskmetricstore.TestGetMetricFamiliesMap.") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) fileName := path.Join(tempDir, "persistence") dms := NewDiskMetricStore(fileName, 100*time.Millisecond, nil, logger) labels1 := map[string]string{ "job": "job1", "instance": "instance1", } labels2 := map[string]string{ "job": "job1", "instance": "instance2", } gk1 := groupingKeyFor(labels1) gk2 := groupingKeyFor(labels2) // Submit a single simple metric family. ts1 := time.Now() errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: labels1, Timestamp: ts1, MetricFamilies: metricFamiliesMap(mf3), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } pushTimestamp := newPushTimestampGauge(labels1, ts1) pushFailedTimestamp := newPushFailedTimestampGauge(labels1, time.Time{}) if err := checkMetricFamilies( dms, mf3, pushTimestamp, pushFailedTimestamp, ); err != nil { t.Error(err) } // Submit two metric families for a different instance. ts2 := ts1.Add(time.Second) errCh = make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: labels2, Timestamp: ts2, MetricFamilies: metricFamiliesMap(mf1b, mf2), Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } // expectedMFMap is a multi-layered map that maps the labelset // fingerprints to the corresponding metric family string // representations. This is for test assertion purposes. expectedMFMap := map[string]map[string]string{ gk1: { "mf3": mf3.String(), pushMetricName: pushTimestamp.String(), pushFailedMetricName: pushFailedTimestamp.String(), }, gk2: { "mf1": mf1b.String(), "mf2": mf2.String(), pushMetricName: newPushTimestampGauge(labels2, ts2).String(), pushFailedMetricName: newPushFailedTimestampGauge(labels2, time.Time{}).String(), }, } if err := checkMetricFamilyGroups(dms, expectedMFMap); err != nil { t.Error(err) } } func TestHelpStringFix(t *testing.T) { dms := NewDiskMetricStore("", 100*time.Millisecond, prometheus.DefaultGatherer, logger) ts1 := time.Now() errCh := make(chan error, 1) dms.SubmitWriteRequest(WriteRequest{ Labels: map[string]string{ "job": "job1", }, Timestamp: ts1, MetricFamilies: map[string]*dto.MetricFamily{ "go_goroutines": mfgg, "mf_help": mfh1, }, }) dms.SubmitWriteRequest(WriteRequest{ Labels: map[string]string{ "job": "job2", }, Timestamp: ts1, MetricFamilies: map[string]*dto.MetricFamily{ "mf_help": mfh2, }, Done: errCh, }) for err := range errCh { t.Fatal("Unexpected error:", err) } // Either we have settled on the mfh1 help string or the mfh2 help string. gotMFs := dms.GetMetricFamilies() if len(gotMFs) != 4 { t.Fatalf("expected 4 metric families, got %d", len(gotMFs)) } gotMFsAsStrings := make([]string, len(gotMFs)) for i, mf := range gotMFs { sort.Sort(metricSorter(mf.GetMetric())) gotMFsAsStrings[i] = mf.String() } sort.Strings(gotMFsAsStrings) gotGG := gotMFsAsStrings[0] got12 := gotMFsAsStrings[1] expectedGG := mfggFixed.String() expected12 := mfh12.String() expected21 := mfh21.String() if gotGG != expectedGG { t.Errorf( "help strings weren't properly adjusted, got '%s', expected '%s'", gotGG, expectedGG, ) } if got12 != expected12 && got12 != expected21 { t.Errorf( "help strings weren't properly adjusted, got '%s' which is neither '%s' nor '%s'", got12, expected12, expected21, ) } if err := dms.Shutdown(); err != nil { t.Fatal(err) } } func TestGroupingKeyForLabels(t *testing.T) { sep := string([]byte{model.SeparatorByte}) scenarios := []struct { in map[string]string out string }{ { in: map[string]string{}, out: "", }, { in: map[string]string{"foo": "bar"}, out: "foo" + sep + "bar", }, { in: map[string]string{"foo": "bar", "dings": "bums"}, out: "dings" + sep + "bums" + sep + "foo" + sep + "bar", }, } for _, s := range scenarios { if want, got := s.out, groupingKeyFor(s.in); want != got { t.Errorf("Want grouping key %q for labels %v, got %q.", want, s.in, got) } } } func checkMetricFamilies(dms *DiskMetricStore, expectedMFs ...*dto.MetricFamily) error { gotMFs := dms.GetMetricFamilies() if expected, got := len(expectedMFs), len(gotMFs); expected != got { return fmt.Errorf("expected %d metric families, got %d", expected, got) } expectedMFsAsStrings := make([]string, len(expectedMFs)) for i, mf := range expectedMFs { sort.Sort(metricSorter(mf.Metric)) expectedMFsAsStrings[i] = mf.String() } sort.Strings(expectedMFsAsStrings) gotMFsAsStrings := make([]string, len(gotMFs)) for i, mf := range gotMFs { sort.Sort(metricSorter(mf.GetMetric())) gotMFsAsStrings[i] = mf.String() } sort.Strings(gotMFsAsStrings) for i, got := range gotMFsAsStrings { expected := expectedMFsAsStrings[i] if expected != got { return fmt.Errorf("expected metric family '%s', got '%s'", expected, got) } } return nil } func checkMetricFamilyGroups(dms *DiskMetricStore, expectedMFMap map[string]map[string]string) error { mfMap := dms.GetMetricFamiliesMap() if expected, got := len(expectedMFMap), len(mfMap); expected != got { return fmt.Errorf("expected %d metric families in map, but got %d", expected, got) } for k, v := range mfMap { if innerMap, ok := expectedMFMap[k]; ok { if len(innerMap) != len(v.Metrics) { return fmt.Errorf("expected %d metric entries for grouping key %s in map, but got %d", len(innerMap), k, len(v.Metrics)) } for metricName, metricString := range innerMap { if v.Metrics[metricName].GetMetricFamily().String() != metricString { return fmt.Errorf("expected metric %s to be present for key %s", metricString, metricName) } } } else { return fmt.Errorf("expected grouping key %s to be present in metric families map", k) } } return nil } type metricSorter []*dto.Metric func (s metricSorter) Len() int { return len(s) } func (s metricSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s metricSorter) Less(i, j int) bool { for n, lp := range s[i].Label { vi := lp.GetValue() vj := s[j].Label[n].GetValue() if vi != vj { return vi < vj } } return true } prometheus-pushgateway-1.0.0+ds/storage/interface.go000066400000000000000000000164371356076130400226170ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "sort" "time" "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) // MetricStore is the interface to the storage layer for metrics. All its // methods must be safe to be called concurrently. type MetricStore interface { // SubmitWriteRequest submits a WriteRequest for processing. There is no // guarantee when a request will be processed, but it is guaranteed that // the requests are processed in the order of submission. SubmitWriteRequest(req WriteRequest) // GetMetricFamilies returns all the currently saved MetricFamilies. The // returned MetricFamilies are guaranteed to not be modified by the // MetricStore anymore. However, they may still be read somewhere else, // so the caller is not allowed to modify the returned MetricFamilies. // If different groups have saved MetricFamilies of the same name, they // are all merged into one MetricFamily by concatenating the contained // Metrics. Inconsistent help strings are logged, and one of the // versions will "win". Inconsistent types and inconsistent or duplicate // label sets will go undetected. GetMetricFamilies() []*dto.MetricFamily // GetMetricFamiliesMap returns a map grouping-key -> MetricGroup. The // MetricFamily pointed to by the Metrics map in each MetricGroup is // guaranteed to not be modified by the MetricStore anymore. However, // they may still be read somewhere else, so the caller is not allowed // to modify it. Otherwise, the returned nested map can be seen as a // deep copy of the internal state of the MetricStore and completely // owned by the caller. GetMetricFamiliesMap() GroupingKeyToMetricGroup // Shutdown must only be called after the caller has made sure that // SubmitWriteRequests is not called anymore. (If it is called later, // the request might get submitted, but not processed anymore.) The // Shutdown method waits for the write request queue to empty, then it // persists the content of the MetricStore (if supported by the // implementation). Also, all internal goroutines are stopped. This // method blocks until all of that is complete. If an error is // encountered, it is returned (whereupon the MetricStorage is in an // undefinded state). If nil is returned, the MetricStore cannot be // "restarted" again, but it can still be used for read operations. Shutdown() error // Healthy returns nil if the MetricStore is currently working as // expected or false, Error if it is not. Healthy() error // Ready returns nil if the MetricStore is ready to be used (all files // are opened and checkpoints have been restored) or false, Error if it // is not. Ready() error } // WriteRequest is a request to change the MetricStore, i.e. to process it, a // write lock has to be acquired. // // If MetricFamilies is nil, this is a request to delete metrics that share the // given Labels as a grouping key. Otherwise, this is a request to update the // MetricStore with the MetricFamilies. // // If Replace is true, the MetricFamilies will completely replace the metrics // with the same grouping key. Otherwise, only those MetricFamilies whith the // same name as new MetricFamilies will be replaced. // // The key in MetricFamilies is the name of the mapped metric family. // // When the WriteRequest is processed, the metrics in MetricFamilies will be // sanitized to have the same job and other labels as those in the Labels // fields. Also, if there is no instance label, an instance label with an empty // value will be set. This implies that the MetricFamilies in the WriteRequest // may be modified be the MetricStore during processing of the WriteRequest! // // The Timestamp field marks the time the request was received from the // network. It is not related to the TimestampMs field in the Metric proto // message. In fact, WriteRequests containing any Metrics with a TimestampMs set // are invalid and will be rejected. // // The Done channel may be nil. If it is not nil, it will be closed once the // write request is processed. Any errors occuring during processing are sent to // the channel before closing it. type WriteRequest struct { Labels map[string]string Timestamp time.Time MetricFamilies map[string]*dto.MetricFamily Replace bool Done chan error } // GroupingKeyToMetricGroup is the first level of the metric store, keyed by // grouping key. type GroupingKeyToMetricGroup map[string]MetricGroup // MetricGroup adds the grouping labels to a NameToTimestampedMetricFamilyMap. type MetricGroup struct { Labels map[string]string Metrics NameToTimestampedMetricFamilyMap } // SortedLabels returns the label names of the grouping labels sorted // lexicographically but with the "job" label always first. This method exists // for presentation purposes, see template.html. func (mg MetricGroup) SortedLabels() []string { lns := make([]string, 1, len(mg.Labels)) lns[0] = "job" for ln := range mg.Labels { if ln != "job" { lns = append(lns, ln) } } sort.Strings(lns[1:]) return lns } // LastPushSuccess returns false if the automatically added metric for the // timestamp of the last failed push has a value larger than the value of the // automatically added metric for the timestamp of the last successful push. In // all other cases, it returns true (including the case that one or both of // those metrics are missing for some reason.) func (mg MetricGroup) LastPushSuccess() bool { fail := mg.Metrics[pushFailedMetricName].GobbableMetricFamily if fail == nil { return true } success := mg.Metrics[pushMetricName].GobbableMetricFamily if success == nil { return true } return (*dto.MetricFamily)(fail).GetMetric()[0].GetGauge().GetValue() <= (*dto.MetricFamily)(success).GetMetric()[0].GetGauge().GetValue() } // NameToTimestampedMetricFamilyMap is the second level of the metric store, // keyed by metric name. type NameToTimestampedMetricFamilyMap map[string]TimestampedMetricFamily // TimestampedMetricFamily adds the push timestamp to a gobbable version of the // MetricFamily-DTO. type TimestampedMetricFamily struct { Timestamp time.Time GobbableMetricFamily *GobbableMetricFamily } // GetMetricFamily returns the normal GetMetricFamily DTO (without the gob additions). func (tmf TimestampedMetricFamily) GetMetricFamily() *dto.MetricFamily { return (*dto.MetricFamily)(tmf.GobbableMetricFamily) } // GobbableMetricFamily is a dto.MetricFamily that implements GobDecoder and // GobEncoder. type GobbableMetricFamily dto.MetricFamily // GobDecode implements gob.GobDecoder. func (gmf *GobbableMetricFamily) GobDecode(b []byte) error { return proto.Unmarshal(b, (*dto.MetricFamily)(gmf)) } // GobEncode implements gob.GobEncoder. func (gmf *GobbableMetricFamily) GobEncode() ([]byte, error) { return proto.Marshal((*dto.MetricFamily)(gmf)) } prometheus-pushgateway-1.0.0+ds/vendor/000077500000000000000000000000001356076130400201465ustar00rootroot00000000000000prometheus-pushgateway-1.0.0+ds/vendor/modules.txt000066400000000000000000000040631356076130400223620ustar00rootroot00000000000000# github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 github.com/alecthomas/template github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/alecthomas/units # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.1.0 github.com/cespare/xxhash/v2 # github.com/go-kit/kit v0.9.0 github.com/go-kit/kit/log github.com/go-kit/kit/log/level # github.com/go-logfmt/logfmt v0.4.0 github.com/go-logfmt/logfmt # github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto # github.com/julienschmidt/httprouter v1.3.0 github.com/julienschmidt/httprouter # github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 github.com/kr/logfmt # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/prometheus/client_golang v1.2.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go # github.com/prometheus/common v0.7.0 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/promlog github.com/prometheus/common/promlog/flag github.com/prometheus/common/server github.com/prometheus/common/version # github.com/prometheus/procfs v0.0.5 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs/vfsutil # github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/shurcooL/vfsgen # golang.org/x/sys v0.0.0-20191010194322-b09406accb47 golang.org/x/sys/windows # gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2