pax_global_header00006660000000000000000000000064141703302100014501gustar00rootroot0000000000000052 comment=6cff384d7433bcb1104efe3b496cd27c0658eb09 prometheus-postgres-exporter-0.10.1/000077500000000000000000000000001417033021000174655ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/.circleci/000077500000000000000000000000001417033021000213205ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/.circleci/config.yml000066400000000000000000000036531417033021000233170ustar00rootroot00000000000000--- version: 2.1 orbs: prometheus: prometheus/prometheus@0.15.0 executors: # This must match .promu.yml. golang: docker: - image: circleci/golang:1.17 jobs: test: executor: golang steps: - prometheus/setup_environment - run: make - prometheus/store_artifact: file: postgres_exporter integration: docker: - image: circleci/golang:1.17 - image: << parameters.postgres_image >> environment: POSTGRES_DB: circle_test POSTGRES_USER: postgres POSTGRES_PASSWORD: test parameters: postgres_image: type: string environment: DATA_SOURCE_NAME: 'postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' GOOPTS: '-v -tags integration' steps: - checkout - setup_remote_docker - run: docker version - run: make build - run: make test workflows: version: 2 postgres_exporter: jobs: - test: filters: tags: only: /.*/ - integration: matrix: parameters: postgres_image: - circleci/postgres:10 - circleci/postgres:11 - circleci/postgres:12 - circleci/postgres:13 - cimg/postgres:14.1 - prometheus/build: name: build filters: tags: only: /.*/ - prometheus/publish_master: context: org-context docker_hub_organization: prometheuscommunity quay_io_organization: prometheuscommunity requires: - test - build filters: branches: only: master - prometheus/publish_release: context: org-context docker_hub_organization: prometheuscommunity quay_io_organization: prometheuscommunity requires: - test - build filters: tags: only: /^v.*/ branches: ignore: /.*/ prometheus-postgres-exporter-0.10.1/.github/000077500000000000000000000000001417033021000210255ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001417033021000232105ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000013441417033021000257040ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve. title: '' assignees: '' --- **What did you do?** **What did you expect to see?** **What did you see instead? Under which circumstances?** **Environment** * System information: insert output of `uname -srm` here * postgres_exporter version: insert output of `postgres_exporter --version` here * postgres_exporter flags: ``` insert list of flags used here ``` * PostgresSQL version: insert PostgreSQL version here * Logs: ``` insert logs relevant to the issue here ``` prometheus-postgres-exporter-0.10.1/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003041417033021000251750ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Prometheus community support url: https://prometheus.io/community/ about: List of communication channels for the Prometheus community. prometheus-postgres-exporter-0.10.1/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000006151417033021000267370ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project. title: '' labels: '' assignees: '' --- ## Proposal **Use case. Why is this important?** *“Nice to have” is not a good use case. :)* prometheus-postgres-exporter-0.10.1/.gitignore000066400000000000000000000004341417033021000214560ustar00rootroot00000000000000/.build /postgres_exporter /postgres_exporter_integration_test *.tar.gz *.test *-stamp /.idea /.vscode *.iml /cover.out /cover.*.out /.coverage /bin /release /*.prom /.metrics.*.*.prom /.metrics.*.*.prom.unique /.assets-branch /.metrics.*.added /.metrics.*.removed /tools/src /vendor prometheus-postgres-exporter-0.10.1/.golangci.yml000066400000000000000000000002261417033021000220510ustar00rootroot00000000000000--- issues: exclude-rules: - path: _test.go linters: - errcheck linters-settings: errcheck: exclude: scripts/errcheck_excludes.txt prometheus-postgres-exporter-0.10.1/.promu.yml000066400000000000000000000013101417033021000214230ustar00rootroot00000000000000go: # This must match .circle/config.yml. version: 1.17 repository: path: github.com/prometheus-community/postgres_exporter build: binaries: - name: postgres_exporter path: ./cmd/postgres_exporter flags: -a -tags 'netgo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE prometheus-postgres-exporter-0.10.1/CHANGELOG.md000066400000000000000000000171251417033021000213040ustar00rootroot00000000000000## 0.10.1 / 2022-01-14 * [BUGFIX] Fix broken log-level for values other than debug. #560 ## 0.10.0 / 2021-07-08 * [ENHANCEMENT] Add ability to set included databases when autoDiscoverDatabases is enabled #499 * [BUGFIX] fix pg_replication_slots on postgresql versions 9.4 <> 10.0 #537 ## 0.9.0 / 2021-03-01 First release under the Prometheus Community organisation. * [CHANGE] Update build to use standard Prometheus promu/Dockerfile * [ENHANCEMENT] Remove duplicate column in queries.yml #433 * [ENHANCEMENT] Add query for 'pg_replication_slots' #465 * [ENHANCEMENT] Allow a custom prefix for metric namespace #387 * [ENHANCEMENT] Improve PostgreSQL replication lag detection #395 * [ENHANCEMENT] Support connstring syntax when discovering databases #473 * [ENHANCEMENT] Detect SIReadLock locks in the pg_locks metric #421 * [BUGFIX] Fix pg_database_size_bytes metric in queries.yaml #357 * [BUGFIX] Don't ignore errors in parseUserQueries #362 * [BUGFIX] Fix queries.yaml for AWS RDS #370 * [BUGFIX] Recover when connection cannot be established at startup #415 * [BUGFIX] Don't retry if an error occurs #426 * [BUGFIX] Do not panic on incorrect env #457 ## 0.8.0 / 2019-11-25 * Add a build info metric (#323) * Re-add pg_stat_bgwriter metrics which were accidentally removed in the previous version. (resolves #336) * Export pg_stat_archiver metrics (#324) * Add support for 'DATA_SOURCE_URI_FILE' envvar. * Resolve #329 * Added new field "master" to queries.yaml. (credit to @sfalkon) - If "master" is true, query will be call only on once database in instance * Change queries.yaml for work with autoDiscoveryDatabases options (credit to @sfalkon) - added current database name to metrics because any database in cluster maybe have the same table names - added "master" field for query instance metrics. ## 0.7.0 / 2019-11-01 Introduces some more significant changes, hence the minor version bump in such a short time frame. * Rename pg_database_size to pg_database_size_bytes in queries.yml. * Add pg_stat_statements to sample queries.yml file. * Add support for optional namespace caching. (#319) * Fix some autodiscovery problems (#314) (resolves #308) * Yaml parsing refactor (#299) * Don't stop generating fingerprint while encountering value with "=" sign (#318) (may resolve problems with passwords and special characters). ## 0.6.0 / 2019-10-30 * Add SQL for grant connect (#303) * Expose pg_current_wal_lsn_bytes (#307) * [minor] fix landing page content-type (#305) * Updated lib/pg driver to 1.2.0 in order to support stronger SCRAM-SHA-256 authentication. This drops support for Go < 1.11 and PostgreSQL < 9.4. (#304) * Provide more helpful default values for tables that have never been vacuumed (#310) * Add retries to getServer() (#316) * Fix pg_up metric returns last calculated value without explicit resetting (#291) * Discover only databases that are not templates and allow connections (#297) * Add --exclude-databases option (#298) ## 0.5.1 / 2019-07-09 * Add application_name as a label for pg_stat_replication metrics (#285). ## 0.5.0 / 2019-07-03 It's been far too long since I've done a release and we have a lot of accumulated changes. * Docker image now runs as a non-root user named "postgres_exporter" * Add `--auto-discover-databases` option, which automatically discovers and scrapes all databases. * Add support for boolean data types as metrics * Replication lag is now expressed as a float and not truncated to an integer. * When default metrics are disabled, no version metrics are collected anymore either. * BUGFIX: Fix exporter panic when postgres server goes down. * Add support for collecting metrics from multiple servers. * PostgreSQL 11 is now supported in the integration tests. ## 0.4.7 / 2018-10-02 * Added a query for v9.1 pg_stat_activity. * Add `--constantLabels` flag to allow applying fixed constant labels to metrics. * queries.yml: dd pg_statio_user_tables. * Support 'B' suffix in units. ## 0.4.6 / 2018-04-15 * Fix issue #173 - 32 and 64mb unit sizes were not supported in pg_settings. ## 0.4.5 / 2018-02-27 * Add commandline flag to disable default metrics (thanks @hsun-cnnxty) ## 0.4.4 / 2018-03-21 * Bugfix for 0.4.3 which broke pg_up (it would always be 0). * pg_up is now refreshed based on database Ping() every scrape. * Re-release of 0.4.4 to fix version numbering. ## 0.4.2 / 2018-02-19 * Adds the following environment variables for overriding defaults: * `PG_EXPORTER_WEB_LISTEN_ADDRESS` * `PG_EXPORTER_WEB_TELEMETRY_PATH` * `PG_EXPORTER_EXTEND_QUERY_PATH` * Add Content-Type to HTTP landing page. * Fix Makefile to produce .exe binaries for Windows. ## 0.4.1 / 2017-11-30 * No code changes to v0.4.0 for the exporter. * First release switching to tar-file based distribution. * First release with Windows and Darwin cross-builds.\\ ## 0.4.0 / 2017-11-29 * Fix panic due to inconsistent label cardinality when using queries.yaml with queries which return extra columns. * Add metric for whether the user queries YAML file parsed correctly. This also includes the filename and SHA256 sum allowing tracking of updates. * Add pg_up metric to indicate whether the exporter was able to connect and Ping() the PG instance before a scrape. * Fix broken link in landing page for `/metrics` ## 0.3.0 / 2017-10-23 * Add support for PostgreSQL 10. ## 0.2.3 / 2017-09-07 * Add support for the 16kB unit when decoding pg_settings. (#101) ## 0.2.2 / 2017-08-04 * Fix DSN logging. The exporter previously never actually logged the DSN when database connections failed. This was also masking a logic error which could potentially lead to a crash when DSN was unparseable, though no actual crash could be produced in testing. ## 0.2.1 / 2017-06-07 * Ignore functions that cannot be executed during replication recovery (#52) * Add a `-version` flag finally. * Add confirmed_flush_lsn to pg_stat_replication. ## 0.2.0 / 2017-04-18 * Major change - use pg_settings to retrieve runtime variables. Adds >180 new metrics and descriptions (big thanks to Matt Bostock for this work). Removes the following metrics: ``` pg_runtime_variable_max_connections pg_runtime_variable_max_files_per_process pg_runtime_variable_max_function_args pg_runtime_variable_max_identifier_length pg_runtime_variable_max_index_keys pg_runtime_variable_max_locks_per_transaction pg_runtime_variable_max_pred_locks_per_transaction pg_runtime_variable_max_prepared_transactions pg_runtime_variable_max_standby_archive_delay_milliseconds pg_runtime_variable_max_standby_streaming_delay_milliseconds pg_runtime_variable_max_wal_senders ``` They are replaced by equivalent names under `pg_settings` with the exception of ``` pg_runtime_variable_max_standby_archive_delay_milliseconds pg_runtime_variable_max_standby_streaming_delay_milliseconds ``` which are replaced with ``` pg_settings_max_standby_archive_delay_seconds pg_settings_max_standby_streaming_delay_seconds ``` ## 0.1.3 / 2017-02-21 * Update the Go build to 1.7.5 to include a fix for NAT handling. * Fix passwords leaking in DB url error message on connection failure. ## 0.1.2 / 2017-02-07 * Use a connection pool of size 1 to reduce memory churn on target database. ## 0.1.1 / 2016-11-29 * Fix pg_stat_replication metrics not being collected due to semantic version filter problem. ## 0.1.0 / 2016-11-21 * Change default port to 9187. * Fix regressions with pg_stat_replication on older versions of Postgres. * Add pg_static metric to store version strings as labels. * Much more thorough testing structure. * Move to semantic versioning for releases and docker image publications. ## 0.0.1 / 2016-06-03 Initial release for publication. prometheus-postgres-exporter-0.10.1/CODE_OF_CONDUCT.md000066400000000000000000000002331417033021000222620ustar00rootroot00000000000000## Prometheus Community Code of Conduct Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). prometheus-postgres-exporter-0.10.1/Dockerfile000066400000000000000000000005271417033021000214630ustar00rootroot00000000000000ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" ARG OS="linux" COPY .build/${OS}-${ARCH}/postgres_exporter /bin/postgres_exporter EXPOSE 9187 USER nobody ENTRYPOINT [ "/bin/postgres_exporter" ] prometheus-postgres-exporter-0.10.1/LICENSE000066400000000000000000000261351417033021000205010ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-postgres-exporter-0.10.1/MAINTAINERS.md000066400000000000000000000002141417033021000215560ustar00rootroot00000000000000* Ben Kochie @SuperQ * William Rouesnel @wrouesnel * Joe Adams @sysadmind prometheus-postgres-exporter-0.10.1/Makefile000066400000000000000000000005221417033021000211240ustar00rootroot00000000000000# Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. all:: # Needs to be defined before including Makefile.common to auto-generate targets DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le DOCKER_REPO ?= prometheuscommunity include Makefile.common DOCKER_IMAGE_NAME ?= postgres-exporter prometheus-postgres-exporter-0.10.1/Makefile.common000066400000000000000000000244531417033021000224240ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') GOVENDOR := GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). GO111MODULE := on ifneq (,$(wildcard vendor)) # Always use the local vendor/ directory to satisfy the dependencies. GOOPTS := $(GOOPTS) -mod=vendor endif endif else ifneq (,$(wildcard go.mod)) ifneq (,$(wildcard vendor)) $(warning This repository requires Go >= 1.11 because of Go modules) $(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') endif else # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) ifneq ($(shell which gotestsum),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif PROMU_VERSION ?= 0.13.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.42.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" ifdef GO111MODULE GO111MODULE=$(GO111MODULE) $(GO) mod download else $(GO) get $(GOOPTS) -t ./... endif .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get -d $$m; \ done GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifneq (,$(wildcard vendor)) GO111MODULE=$(GO111MODULE) $(GO) mod vendor endif .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format common-format: @echo ">> formatting code" GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else $(GOLANGCI_LINT) run $(pkgs) endif endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" ifeq (, $(shell which yamllint)) @echo "yamllint not installed so skipping" else yamllint . endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) ifdef GOVENDOR @echo ">> running check for unused packages" @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod else @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ endif endif endif .PHONY: common-build common-build: promu @echo ">> building binaries" GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR .PHONY: $(GOVENDOR) $(GOVENDOR): GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef prometheus-postgres-exporter-0.10.1/NOTICE000066400000000000000000000001061417033021000203660ustar00rootroot00000000000000Copyright 2018 William Rouesnel Copyright 2021 The Prometheus Authors prometheus-postgres-exporter-0.10.1/README-RDS.md000066400000000000000000000030721417033021000213740ustar00rootroot00000000000000# Using Postgres-Exporter with AWS:RDS ### When using postgres-exporter with Amazon Web Services' RDS, the rolname "rdsadmin" and datname "rdsadmin" must be excluded. I had success running docker container 'quay.io/prometheuscommunity/postgres-exporter:latest' with queries.yaml as the PG_EXPORTER_EXTEND_QUERY_PATH. errors mentioned in issue#335 appeared and I had to modify the 'pg_stat_statements' query with the following: `WHERE t2.rolname != 'rdsadmin'` Running postgres-exporter in a container like so: ``` DBNAME='postgres' PGUSER='postgres' PGPASS='psqlpasswd123' PGHOST='name.blahblah.us-east-1.rds.amazonaws.com' docker run --rm --detach \ --name "postgresql_exporter_rds" \ --publish 9187:9187 \ --volume=/etc/prometheus/postgresql-exporter/queries.yaml:/var/lib/postgresql/queries.yaml \ -e DATA_SOURCE_NAME="postgresql://${PGUSER}:${PGPASS}@${PGHOST}:5432/${DBNAME}?sslmode=disable" \ -e PG_EXPORTER_EXCLUDE_DATABASES=rdsadmin \ -e PG_EXPORTER_DISABLE_DEFAULT_METRICS=true \ -e PG_EXPORTER_DISABLE_SETTINGS_METRICS=true \ -e PG_EXPORTER_EXTEND_QUERY_PATH='/var/lib/postgresql/queries.yaml' \ quay.io/prometheuscommunity/postgres-exporter ``` ### Expected changes to RDS: + see stackoverflow notes (https://stackoverflow.com/questions/43926499/amazon-postgres-rds-pg-stat-statements-not-loaded#43931885) + you must also use a specific RDS parameter_group that includes the following: ``` shared_preload_libraries = "pg_stat_statements,pg_hint_plan" ``` + lastly, you must reboot the RDS instance. prometheus-postgres-exporter-0.10.1/README.md000066400000000000000000000273131417033021000207520ustar00rootroot00000000000000[![Build Status](https://circleci.com/gh/prometheus-community/postgres_exporter.svg?style=svg)](https://circleci.com/gh/prometheus-community/postgres_exporter) [![Coverage Status](https://coveralls.io/repos/github/prometheus-community/postgres_exporter/badge.svg?branch=master)](https://coveralls.io/github/prometheus-community/postgres_exporter?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus-community/postgres_exporter)](https://goreportcard.com/report/github.com/prometheus-community/postgres_exporter) [![Docker Pulls](https://img.shields.io/docker/pulls/prometheuscommunity/postgres-exporter.svg)](https://hub.docker.com/r/prometheuscommunity/postgres-exporter/tags) # PostgreSQL Server Exporter Prometheus exporter for PostgreSQL server metrics. CI Tested PostgreSQL versions: `9.4`, `9.5`, `9.6`, `10`, `11`, `12`, `13` ## Quick Start This package is available for Docker: ``` # Start an example database docker run --net=host -it --rm -e POSTGRES_PASSWORD=password postgres # Connect to it docker run \ --net=host \ -e DATA_SOURCE_NAME="postgresql://postgres:password@localhost:5432/postgres?sslmode=disable" \ quay.io/prometheuscommunity/postgres-exporter ``` ## Building and running git clone https://github.com/prometheus-community/postgres_exporter.git cd postgres_exporter make build ./postgres_exporter To build the Docker image: make promu promu crossbuild -p linux/amd64 -p linux/armv7 -p linux/amd64 -p linux/ppc64le make docker This will build the docker image as `prometheuscommunity/postgres_exporter:${branch}`. ### Flags * `help` Show context-sensitive help (also try --help-long and --help-man). * `web.listen-address` Address to listen on for web interface and telemetry. Default is `:9187`. * `web.telemetry-path` Path under which to expose metrics. Default is `/metrics`. * `disable-default-metrics` Use only metrics supplied from `queries.yaml` via `--extend.query-path`. * `disable-settings-metrics` Use the flag if you don't want to scrape `pg_settings`. * `auto-discover-databases` Whether to discover the databases on a server dynamically. * `extend.query-path` Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) for examples of the format. * `dumpmaps` Do not run - print the internal representation of the metric maps. Useful when debugging a custom queries file. * `constantLabels` Labels to set in all metrics. A list of `label=value` pairs, separated by commas. * `version` Show application version. * `exclude-databases` A list of databases to remove when autoDiscoverDatabases is enabled. * `include-databases` A list of databases to only include when autoDiscoverDatabases is enabled. * `log.level` Set logging level: one of `debug`, `info`, `warn`, `error`. * `log.format` Set the log format: one of `logfmt`, `json`. * `web.config.file` Configuration file to use TLS and/or basic authentication. The format of the file is described [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). ### Environment Variables The following environment variables configure the exporter: * `DATA_SOURCE_NAME` the default legacy format. Accepts URI form and key=value form arguments. The URI may contain the username and password to connect with. * `DATA_SOURCE_URI` an alternative to `DATA_SOURCE_NAME` which exclusively accepts the hostname without a username and password component. For example, `my_pg_hostname` or `my_pg_hostname?sslmode=disable`. * `DATA_SOURCE_URI_FILE` The same as above but reads the URI from a file. * `DATA_SOURCE_USER` When using `DATA_SOURCE_URI`, this environment variable is used to specify the username. * `DATA_SOURCE_USER_FILE` The same, but reads the username from a file. * `DATA_SOURCE_PASS` When using `DATA_SOURCE_URI`, this environment variable is used to specify the password to connect with. * `DATA_SOURCE_PASS_FILE` The same as above but reads the password from a file. * `PG_EXPORTER_WEB_LISTEN_ADDRESS` Address to listen on for web interface and telemetry. Default is `:9187`. * `PG_EXPORTER_WEB_TELEMETRY_PATH` Path under which to expose metrics. Default is `/metrics`. * `PG_EXPORTER_DISABLE_DEFAULT_METRICS` Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`. * `PG_EXPORTER_DISABLE_SETTINGS_METRICS` Use the flag if you don't want to scrape `pg_settings`. Value can be `true` or `false`. Default is `false`. * `PG_EXPORTER_AUTO_DISCOVER_DATABASES` Whether to discover the databases on a server dynamically. Value can be `true` or `false`. Default is `false`. * `PG_EXPORTER_EXTEND_QUERY_PATH` Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) for examples of the format. * `PG_EXPORTER_CONSTANT_LABELS` Labels to set in all metrics. A list of `label=value` pairs, separated by commas. * `PG_EXPORTER_EXCLUDE_DATABASES` A comma-separated list of databases to remove when autoDiscoverDatabases is enabled. Default is empty string. * `PG_EXPORTER_INCLUDE_DATABASES` A comma-separated list of databases to only include when autoDiscoverDatabases is enabled. Default is empty string, means allow all. * `PG_EXPORTER_METRIC_PREFIX` A prefix to use for each of the default metrics exported by postgres-exporter. Default is `pg` Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given. ### Setting the Postgres server's data source name The PostgreSQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_name) must be set via the `DATA_SOURCE_NAME` environment variable. For running it locally on a default Debian/Ubuntu install, this will work (transpose to init script as appropriate): sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter Also, you can set a list of sources to scrape different instances from the one exporter setup. Just define a comma separated string. sudo -u postgres DATA_SOURCE_NAME="port=5432,port=6432" postgres_exporter See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string. ### Adding new metrics The exporter will attempt to dynamically export additional metrics if they are added in the future, but they will be marked as "untyped". Additional metric maps can be easily created from Postgres documentation by copying the tables and using the following Python snippet: ```python x = """tab separated raw text of a documentation table""" for l in StringIO(x): column, ctype, description = l.split('\t') print """"{0}" : {{ prometheus.CounterValue, prometheus.NewDesc("pg_stat_database_{0}", "{2}", nil, nil) }}, """.format(column.strip(), ctype, description.strip()) ``` Adjust the value of the resultant prometheus value type appropriately. This helps build rich self-documenting metrics for the exporter. ### Adding new metrics via a config file The -extend.query-path command-line argument specifies a YAML file containing additional queries to run. Some examples are provided in [queries.yaml](queries.yaml). ### Disabling default metrics To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15) or a variant of postgres (e.g. Greenplum) you can disable the default metrics with the `--disable-default-metrics` flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply (so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database). ### Automatically discover databases To scrape metrics from all databases on a database server, the database DSN's can be dynamically discovered via the `--auto-discover-databases` flag. When true, `SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false and datname != current_database()` is run for all configured DSN's. From the result a new set of DSN's is created for which the metrics are scraped. In addition, the option `--exclude-databases` adds the possibily to filter the result from the auto discovery to discard databases you do not need. If you want to include only subset of databases, you can use option `--include-databases`. Exporter still makes request to `pg_database` table, but do scrape from only if database is in include list. ### Running as non-superuser To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication` as non-superuser you have to create functions and views as a superuser, and assign permissions separately to those. In PostgreSQL, views run with the permissions of the user that created them so they can act as security barriers. Functions need to be created to share this data with the non-superuser. Only creating the views will leave out the most important bits of data. ```sql -- To use IF statements, hence to be able to check if the user exists before -- attempting creation, we need to switch to procedural SQL (PL/pgSQL) -- instead of standard SQL. -- More: https://www.postgresql.org/docs/9.3/plpgsql-overview.html -- To preserve compatibility with <9.0, DO blocks are not used; instead, -- a function is created and dropped. CREATE OR REPLACE FUNCTION __tmp_create_user() returns void as $$ BEGIN IF NOT EXISTS ( SELECT -- SELECT list can stay empty for this FROM pg_catalog.pg_user WHERE usename = 'postgres_exporter') THEN CREATE USER postgres_exporter; END IF; END; $$ language plpgsql; SELECT __tmp_create_user(); DROP FUNCTION __tmp_create_user(); ALTER USER postgres_exporter WITH PASSWORD 'password'; ALTER USER postgres_exporter SET SEARCH_PATH TO postgres_exporter,pg_catalog; -- If deploying as non-superuser (for example in AWS RDS), uncomment the GRANT -- line below and replace with your root user. -- GRANT postgres_exporter TO ; CREATE SCHEMA IF NOT EXISTS postgres_exporter; GRANT USAGE ON SCHEMA postgres_exporter TO postgres_exporter; GRANT CONNECT ON DATABASE postgres TO postgres_exporter; CREATE OR REPLACE FUNCTION get_pg_stat_activity() RETURNS SETOF pg_stat_activity AS $$ SELECT * FROM pg_catalog.pg_stat_activity; $$ LANGUAGE sql VOLATILE SECURITY DEFINER; CREATE OR REPLACE VIEW postgres_exporter.pg_stat_activity AS SELECT * from get_pg_stat_activity(); GRANT SELECT ON postgres_exporter.pg_stat_activity TO postgres_exporter; CREATE OR REPLACE FUNCTION get_pg_stat_replication() RETURNS SETOF pg_stat_replication AS $$ SELECT * FROM pg_catalog.pg_stat_replication; $$ LANGUAGE sql VOLATILE SECURITY DEFINER; CREATE OR REPLACE VIEW postgres_exporter.pg_stat_replication AS SELECT * FROM get_pg_stat_replication(); GRANT SELECT ON postgres_exporter.pg_stat_replication TO postgres_exporter; CREATE OR REPLACE FUNCTION get_pg_stat_statements() RETURNS SETOF pg_stat_statements AS $$ SELECT * FROM public.pg_stat_statements; $$ LANGUAGE sql VOLATILE SECURITY DEFINER; CREATE OR REPLACE VIEW postgres_exporter.pg_stat_statements AS SELECT * FROM get_pg_stat_statements(); GRANT SELECT ON postgres_exporter.pg_stat_statements TO postgres_exporter; ``` > **NOTE** >
Remember to use `postgres` database name in the connection string: > ``` > DATA_SOURCE_NAME=postgresql://postgres_exporter:password@localhost:5432/postgres?sslmode=disable > ``` ## Running the tests ``` # Run the unit tests make test # Start the test database with docker docker run -p 5432:5432 -e POSTGRES_DB=circle_test -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=test -d postgres # Run the integration tests DATA_SOURCE_NAME='postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' GOOPTS='-v -tags integration' make test ``` prometheus-postgres-exporter-0.10.1/SECURITY.md000066400000000000000000000002521417033021000212550ustar00rootroot00000000000000# Reporting a security issue The Prometheus security policy, including how to report vulnerabilities, can be found here: https://prometheus.io/docs/operating/security/ prometheus-postgres-exporter-0.10.1/VERSION000066400000000000000000000000071417033021000205320ustar00rootroot000000000000000.10.1 prometheus-postgres-exporter-0.10.1/cmd/000077500000000000000000000000001417033021000202305ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/000077500000000000000000000000001417033021000240265ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/datasource.go000066400000000000000000000120171417033021000265100ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "io/ioutil" "net/url" "os" "regexp" "strings" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) func (e *Exporter) discoverDatabaseDSNs() []string { // connstring syntax is complex (and not sure if even regular). // we don't need to parse it, so just superficially validate that it starts // with a valid-ish keyword pair connstringRe := regexp.MustCompile(`^ *[a-zA-Z0-9]+ *= *[^= ]+`) dsns := make(map[string]struct{}) for _, dsn := range e.dsn { var dsnURI *url.URL var dsnConnstring string if strings.HasPrefix(dsn, "postgresql://") { var err error dsnURI, err = url.Parse(dsn) if err != nil { level.Error(logger).Log("msg", "Unable to parse DSN as URI", "dsn", loggableDSN(dsn), "err", err) continue } } else if connstringRe.MatchString(dsn) { dsnConnstring = dsn } else { level.Error(logger).Log("msg", "Unable to parse DSN as either URI or connstring", "dsn", loggableDSN(dsn)) continue } server, err := e.servers.GetServer(dsn) if err != nil { level.Error(logger).Log("msg", "Error opening connection to database", "dsn", loggableDSN(dsn), "err", err) continue } dsns[dsn] = struct{}{} // If autoDiscoverDatabases is true, set first dsn as master database (Default: false) server.master = true databaseNames, err := queryDatabases(server) if err != nil { level.Error(logger).Log("msg", "Error querying databases", "dsn", loggableDSN(dsn), "err", err) continue } for _, databaseName := range databaseNames { if contains(e.excludeDatabases, databaseName) { continue } if len(e.includeDatabases) != 0 && !contains(e.includeDatabases, databaseName) { continue } if dsnURI != nil { dsnURI.Path = databaseName dsn = dsnURI.String() } else { // replacing one dbname with another is complicated. // just append new dbname to override. dsn = fmt.Sprintf("%s dbname=%s", dsnConnstring, databaseName) } dsns[dsn] = struct{}{} } } result := make([]string, len(dsns)) index := 0 for dsn := range dsns { result[index] = dsn index++ } return result } func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error { server, err := e.servers.GetServer(dsn) if err != nil { return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())} } // Check if autoDiscoverDatabases is false, set dsn as master database (Default: false) if !e.autoDiscoverDatabases { server.master = true } // Check if map versions need to be updated if err := e.checkMapVersions(ch, server); err != nil { level.Warn(logger).Log("msg", "Proceeding with outdated query maps, as the Postgres version could not be determined", "err", err) } return server.Scrape(ch, e.disableSettingsMetrics) } // try to get the DataSource // DATA_SOURCE_NAME always wins so we do not break older versions // reading secrets from files wins over secrets in environment variables // DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS} func getDataSources() ([]string, error) { var dsn = os.Getenv("DATA_SOURCE_NAME") if len(dsn) != 0 { return strings.Split(dsn, ","), nil } var user, pass, uri string dataSourceUserFile := os.Getenv("DATA_SOURCE_USER_FILE") if len(dataSourceUserFile) != 0 { fileContents, err := ioutil.ReadFile(dataSourceUserFile) if err != nil { return nil, fmt.Errorf("failed loading data source user file %s: %s", dataSourceUserFile, err.Error()) } user = strings.TrimSpace(string(fileContents)) } else { user = os.Getenv("DATA_SOURCE_USER") } dataSourcePassFile := os.Getenv("DATA_SOURCE_PASS_FILE") if len(dataSourcePassFile) != 0 { fileContents, err := ioutil.ReadFile(dataSourcePassFile) if err != nil { return nil, fmt.Errorf("failed loading data source pass file %s: %s", dataSourcePassFile, err.Error()) } pass = strings.TrimSpace(string(fileContents)) } else { pass = os.Getenv("DATA_SOURCE_PASS") } ui := url.UserPassword(user, pass).String() dataSrouceURIFile := os.Getenv("DATA_SOURCE_URI_FILE") if len(dataSrouceURIFile) != 0 { fileContents, err := ioutil.ReadFile(dataSrouceURIFile) if err != nil { return nil, fmt.Errorf("failed loading data source URI file %s: %s", dataSrouceURIFile, err.Error()) } uri = strings.TrimSpace(string(fileContents)) } else { uri = os.Getenv("DATA_SOURCE_URI") } dsn = "postgresql://" + ui + "@" + uri return []string{dsn}, nil } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/main.go000066400000000000000000000124611417033021000253050ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "net/http" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" "gopkg.in/alecthomas/kingpin.v2" ) var ( listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String() webConfig = webflag.AddFlags(kingpin.CommandLine) metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String() disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool() autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically.").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() includeDatabases = kingpin.Flag("include-databases", "A list of databases to include when autoDiscoverDatabases is enabled").Default("").Envar("PG_EXPORTER_INCLUDE_DATABASES").String() metricPrefix = kingpin.Flag("metric-prefix", "A metric prefix can be used to have non-default (not \"pg\") prefixes for each of the metrics").Default("pg").Envar("PG_EXPORTER_METRIC_PREFIX").String() logger = log.NewNopLogger() ) // Metric name parts. const ( // Namespace for all metrics. namespace = "pg" // Subsystems. exporter = "exporter" // The name of the exporter. exporterName = "postgres_exporter" // Metric label used for static string data thats handy to send to Prometheus // e.g. version staticLabelName = "static" // Metric label used for server identification. serverLabelName = "server" ) func main() { kingpin.Version(version.Print(exporterName)) promlogConfig := &promlog.Config{} flag.AddFlags(kingpin.CommandLine, promlogConfig) kingpin.HelpFlag.Short('h') kingpin.Parse() logger = promlog.New(promlogConfig) // landingPage contains the HTML served at '/'. // TODO: Make this nicer and more informative. var landingPage = []byte(` Postgres exporter

Postgres exporter

Metrics

`) if *onlyDumpMaps { dumpMaps() return } dsn, err := getDataSources() if err != nil { level.Error(logger).Log("msg", "Failed reading data sources", "err", err.Error()) os.Exit(1) } if len(dsn) == 0 { level.Error(logger).Log("msg", "Couldn't find environment variables describing the datasource to use") os.Exit(1) } opts := []ExporterOpt{ DisableDefaultMetrics(*disableDefaultMetrics), DisableSettingsMetrics(*disableSettingsMetrics), AutoDiscoverDatabases(*autoDiscoverDatabases), WithUserQueriesPath(*queriesPath), WithConstantLabels(*constantLabelsList), ExcludeDatabases(*excludeDatabases), IncludeDatabases(*includeDatabases), } exporter := NewExporter(dsn, opts...) defer func() { exporter.servers.Close() }() prometheus.MustRegister(version.NewCollector(exporterName)) prometheus.MustRegister(exporter) http.Handle(*metricPath, promhttp.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html; charset=UTF-8") // nolint: errcheck w.Write(landingPage) // nolint: errcheck }) level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress) srv := &http.Server{Addr: *listenAddress} if err := web.ListenAndServe(srv, *webConfig, logger); err != nil { level.Error(logger).Log("msg", "Error running HTTP server", "err", err) os.Exit(1) } } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/namespace.go000066400000000000000000000211151417033021000263110ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "database/sql" "errors" "fmt" "time" "github.com/blang/semver" "github.com/go-kit/log/level" "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" ) // Query within a namespace mapping and emit metrics. Returns fatal errors if // the scrape fails, and a slice of errors if they were non-fatal. func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { // Check for a query override for this namespace query, found := server.queryOverrides[namespace] // Was this query disabled (i.e. nothing sensible can be queried on cu // version of PostgreSQL? if query == "" && found { // Return success (no pertinent data) return []prometheus.Metric{}, []error{}, nil } // Don't fail on a bad scrape of one metric var rows *sql.Rows var err error if !found { // I've no idea how to avoid this properly at the moment, but this is // an admin tool so you're not injecting SQL right? rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas } else { rows, err = server.db.Query(query) } if err != nil { return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) } defer rows.Close() // nolint: errcheck var columnNames []string columnNames, err = rows.Columns() if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) } // Make a lookup map for the column indices var columnIdx = make(map[string]int, len(columnNames)) for i, n := range columnNames { columnIdx[n] = i } var columnData = make([]interface{}, len(columnNames)) var scanArgs = make([]interface{}, len(columnNames)) for i := range columnData { scanArgs[i] = &columnData[i] } nonfatalErrors := []error{} metrics := make([]prometheus.Metric, 0) for rows.Next() { err = rows.Scan(scanArgs...) if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) } // Get the label values for this row. labels := make([]string, len(mapping.labels)) for idx, label := range mapping.labels { labels[idx], _ = dbToString(columnData[columnIdx[label]]) } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number *if* they can be // converted to float64s. NULLs are allowed and treated as NaN. for idx, columnName := range columnNames { var metric prometheus.Metric if metricMapping, ok := mapping.columnMappings[columnName]; ok { // Is this a metricy metric? if metricMapping.discard { continue } if metricMapping.histogram { var keys []float64 err = pq.Array(&keys).Scan(columnData[idx]) if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "buckets:", namespace, err)) } var values []int64 valuesIdx, ok := columnIdx[columnName+"_bucket"] if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_bucket"))) continue } err = pq.Array(&values).Scan(columnData[valuesIdx]) if err != nil { return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "bucket values:", namespace, err)) } buckets := make(map[float64]uint64, len(keys)) for i, key := range keys { if i >= len(values) { break } buckets[key] = uint64(values[i]) } idx, ok = columnIdx[columnName+"_sum"] if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_sum"))) continue } sum, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_sum", columnData[idx]))) continue } idx, ok = columnIdx[columnName+"_count"] if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_count"))) continue } count, ok := dbToUint64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_count", columnData[idx]))) continue } metric = prometheus.MustNewConstHistogram( metricMapping.desc, count, sum, buckets, labels..., ) } else { value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx]))) continue } // Generate the metric metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...) } } else { // Unknown metric. Report as untyped if scan to float64 works, else note an error too. metricLabel := fmt.Sprintf("%s_%s", namespace, columnName) desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels) // Its not an error to fail here, since the values are // unexpected anyway. value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err))) continue } metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...) } metrics = append(metrics, metric) } } return metrics, nonfatalErrors, nil } // Iterate through all the namespace mappings in the exporter and run their // queries. func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error { // Return a map of namespace -> errors namespaceErrors := make(map[string]error) scrapeStart := time.Now() for namespace, mapping := range server.metricMap { level.Debug(logger).Log("msg", "Querying namespace", "namespace", namespace) if mapping.master && !server.master { level.Debug(logger).Log("msg", "Query skipped...") continue } // check if the query is to be run on specific database server version range or not if len(server.runonserver) > 0 { serVersion, _ := semver.Parse(server.lastMapVersion.String()) runServerRange, _ := semver.ParseRange(server.runonserver) if !runServerRange(serVersion) { level.Debug(logger).Log("msg", "Query skipped for this database version", "version", server.lastMapVersion.String(), "target_version", server.runonserver) continue } } scrapeMetric := false // Check if the metric is cached server.cacheMtx.Lock() cachedMetric, found := server.metricCache[namespace] server.cacheMtx.Unlock() // If found, check if needs refresh from cache if found { if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) { scrapeMetric = true } } else { scrapeMetric = true } var metrics []prometheus.Metric var nonFatalErrors []error var err error if scrapeMetric { metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping) } else { metrics = cachedMetric.metrics } // Serious error - a namespace disappeared if err != nil { namespaceErrors[namespace] = err level.Info(logger).Log("err", err) } // Non-serious errors - likely version or parsing problems. if len(nonFatalErrors) > 0 { for _, err := range nonFatalErrors { level.Info(logger).Log("err", err) } } // Emit the metrics into the channel for _, metric := range metrics { ch <- metric } if scrapeMetric { // Only cache if metric is meaningfully cacheable if mapping.cacheSeconds > 0 { server.cacheMtx.Lock() server.metricCache[namespace] = cachedMetrics{ metrics: metrics, lastScrape: scrapeStart, } server.cacheMtx.Unlock() } } } return namespaceErrors } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/pg_setting.go000066400000000000000000000100171417033021000265170ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "math" "strconv" "strings" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // Query the pg_settings view containing runtime variables func querySettings(ch chan<- prometheus.Metric, server *Server) error { level.Debug(logger).Log("msg", "Querying pg_setting view", "server", server) // pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html // // NOTE: If you add more vartypes here, you must update the supported // types in normaliseUnit() below query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');" rows, err := server.db.Query(query) if err != nil { return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) } defer rows.Close() // nolint: errcheck for rows.Next() { s := &pgSetting{} err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype) if err != nil { return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err) } ch <- s.metric(server.labels) } return nil } // pgSetting is represents a PostgreSQL runtime variable as returned by the // pg_settings view. type pgSetting struct { name, setting, unit, shortDesc, vartype string } func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric { var ( err error name = strings.Replace(s.name, ".", "_", -1) unit = s.unit // nolint: ineffassign shortDesc = s.shortDesc subsystem = "settings" val float64 ) switch s.vartype { case "bool": if s.setting == "on" { val = 1 } case "integer", "real": if val, unit, err = s.normaliseUnit(); err != nil { // Panic, since we should recognise all units // and don't want to silently exlude metrics panic(err) } if len(unit) > 0 { name = fmt.Sprintf("%s_%s", name, unit) shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit) } default: // Panic because we got a type we didn't ask for panic(fmt.Sprintf("Unsupported vartype %q", s.vartype)) } desc := newDesc(subsystem, name, shortDesc, labels) return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val) } // TODO: fix linter override // nolint: nakedret func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { val, err = strconv.ParseFloat(s.setting, 64) if err != nil { return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err) } // Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html switch s.unit { case "": return case "ms", "s", "min", "h", "d": unit = "seconds" case "B", "kB", "MB", "GB", "TB", "8kB", "16kB", "32kB", "16MB", "32MB", "64MB": unit = "bytes" default: err = fmt.Errorf("Unknown unit for runtime variable: %q", s.unit) return } // -1 is special, don't modify the value if val == -1 { return } switch s.unit { case "ms": val /= 1000 case "min": val *= 60 case "h": val *= 60 * 60 case "d": val *= 60 * 60 * 24 case "kB": val *= math.Pow(2, 10) case "MB": val *= math.Pow(2, 20) case "GB": val *= math.Pow(2, 30) case "TB": val *= math.Pow(2, 40) case "8kB": val *= math.Pow(2, 13) case "16kB": val *= math.Pow(2, 14) case "32kB": val *= math.Pow(2, 15) case "16MB": val *= math.Pow(2, 24) case "32MB": val *= math.Pow(2, 25) case "64MB": val *= math.Pow(2, 26) } return } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/pg_setting_test.go000066400000000000000000000145171417033021000275670ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !integration // +build !integration package main import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" . "gopkg.in/check.v1" ) type PgSettingSuite struct{} var _ = Suite(&PgSettingSuite{}) var fixtures = []fixture{ { p: pgSetting{ name: "seconds_fixture_metric", setting: "5", unit: "s", shortDesc: "Foo foo foo", vartype: "integer", }, n: normalised{ val: 5, unit: "seconds", err: "", }, d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, v: 5, }, { p: pgSetting{ name: "milliseconds_fixture_metric", setting: "5000", unit: "ms", shortDesc: "Foo foo foo", vartype: "integer", }, n: normalised{ val: 5, unit: "seconds", err: "", }, d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, v: 5, }, { p: pgSetting{ name: "eight_kb_fixture_metric", setting: "17", unit: "8kB", shortDesc: "Foo foo foo", vartype: "integer", }, n: normalised{ val: 139264, unit: "bytes", err: "", }, d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, v: 139264, }, { p: pgSetting{ name: "16_kb_real_fixture_metric", setting: "3.0", unit: "16kB", shortDesc: "Foo foo foo", vartype: "real", }, n: normalised{ val: 49152, unit: "bytes", err: "", }, d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, v: 49152, }, { p: pgSetting{ name: "16_mb_real_fixture_metric", setting: "3.0", unit: "16MB", shortDesc: "Foo foo foo", vartype: "real", }, n: normalised{ val: 5.0331648e+07, unit: "bytes", err: "", }, d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, v: 5.0331648e+07, }, { p: pgSetting{ name: "32_mb_real_fixture_metric", setting: "3.0", unit: "32MB", shortDesc: "Foo foo foo", vartype: "real", }, n: normalised{ val: 1.00663296e+08, unit: "bytes", err: "", }, d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, v: 1.00663296e+08, }, { p: pgSetting{ name: "64_mb_real_fixture_metric", setting: "3.0", unit: "64MB", shortDesc: "Foo foo foo", vartype: "real", }, n: normalised{ val: 2.01326592e+08, unit: "bytes", err: "", }, d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, v: 2.01326592e+08, }, { p: pgSetting{ name: "bool_on_fixture_metric", setting: "on", unit: "", shortDesc: "Foo foo foo", vartype: "bool", }, n: normalised{ val: 1, unit: "", err: "", }, d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`, v: 1, }, { p: pgSetting{ name: "bool_off_fixture_metric", setting: "off", unit: "", shortDesc: "Foo foo foo", vartype: "bool", }, n: normalised{ val: 0, unit: "", err: "", }, d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`, v: 0, }, { p: pgSetting{ name: "special_minus_one_value", setting: "-1", unit: "d", shortDesc: "foo foo foo", vartype: "integer", }, n: normalised{ val: -1, unit: "seconds", err: "", }, d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, v: -1, }, { p: pgSetting{ name: "rds.rds_superuser_reserved_connections", setting: "2", unit: "", shortDesc: "Sets the number of connection slots reserved for rds_superusers.", vartype: "integer", }, n: normalised{ val: 2, unit: "", err: "", }, d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Sets the number of connection slots reserved for rds_superusers.", constLabels: {}, variableLabels: []}`, v: 2, }, { p: pgSetting{ name: "unknown_unit", setting: "10", unit: "nonexistent", shortDesc: "foo foo foo", vartype: "integer", }, n: normalised{ val: 10, unit: "", err: `Unknown unit for runtime variable: "nonexistent"`, }, }, } func (s *PgSettingSuite) TestNormaliseUnit(c *C) { for _, f := range fixtures { switch f.p.vartype { case "integer", "real": val, unit, err := f.p.normaliseUnit() c.Check(val, Equals, f.n.val) c.Check(unit, Equals, f.n.unit) if err == nil { c.Check("", Equals, f.n.err) } else { c.Check(err.Error(), Equals, f.n.err) } } } } func (s *PgSettingSuite) TestMetric(c *C) { defer func() { if r := recover(); r != nil { if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` { panic(r) } } }() for _, f := range fixtures { d := &dto.Metric{} m := f.p.metric(prometheus.Labels{}) m.Write(d) // nolint: errcheck c.Check(m.Desc().String(), Equals, f.d) c.Check(d.GetGauge().GetValue(), Equals, f.v) } } type normalised struct { val float64 unit string err string } type fixture struct { p pgSetting n normalised d string v float64 } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/postgres_exporter.go000066400000000000000000001003341417033021000301540ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "crypto/sha256" "database/sql" "errors" "fmt" "io/ioutil" "math" "regexp" "strings" "time" "github.com/blang/semver" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // ColumnUsage should be one of several enum values which describe how a // queried row is to be converted to a Prometheus metric. type ColumnUsage int const ( // DISCARD ignores a column DISCARD ColumnUsage = iota // LABEL identifies a column as a label LABEL ColumnUsage = iota // COUNTER identifies a column as a counter COUNTER ColumnUsage = iota // GAUGE identifies a column as a gauge GAUGE ColumnUsage = iota // MAPPEDMETRIC identifies a column as a mapping of text values MAPPEDMETRIC ColumnUsage = iota // DURATION identifies a column as a text duration (and converted to milliseconds) DURATION ColumnUsage = iota // HISTOGRAM identifies a column as a histogram HISTOGRAM ColumnUsage = iota ) // UnmarshalYAML implements the yaml.Unmarshaller interface. func (cu *ColumnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error { var value string if err := unmarshal(&value); err != nil { return err } columnUsage, err := stringToColumnUsage(value) if err != nil { return err } *cu = columnUsage return nil } // MappingOptions is a copy of ColumnMapping used only for parsing type MappingOptions struct { Usage string `yaml:"usage"` Description string `yaml:"description"` Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). } // Mapping represents a set of MappingOptions type Mapping map[string]MappingOptions // Regex used to get the "short-version" from the postgres version field. var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) var lowestSupportedVersion = semver.MustParse("9.1.0") // Parses the version of postgres into the short version string we can use to // match behaviors. func parseVersion(versionString string) (semver.Version, error) { submatches := versionRegex.FindStringSubmatch(versionString) if len(submatches) > 1 { return semver.ParseTolerant(submatches[1]) } return semver.Version{}, errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString)) } // ColumnMapping is the user-friendly representation of a prometheus descriptor map type ColumnMapping struct { usage ColumnUsage `yaml:"usage"` description string `yaml:"description"` mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC supportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). } // UnmarshalYAML implements yaml.Unmarshaller func (cm *ColumnMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain ColumnMapping return unmarshal((*plain)(cm)) } // intermediateMetricMap holds the partially loaded metric map parsing. // This is mainly so we can parse cacheSeconds around. type intermediateMetricMap struct { columnMappings map[string]ColumnMapping master bool cacheSeconds uint64 } // MetricMapNamespace groups metric maps under a shared set of labels. type MetricMapNamespace struct { labels []string // Label names for this namespace columnMappings map[string]MetricMap // Column mappings in this namespace master bool // Call query only for master database cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables. } // MetricMap stores the prometheus metric description which a given column will // be mapped to by the collector type MetricMap struct { discard bool // Should metric be discarded during mapping? histogram bool // Should metric be treated as a histogram? vtype prometheus.ValueType // Prometheus valuetype desc *prometheus.Desc // Prometheus descriptor conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64 } // ErrorConnectToServer is a connection to PgSQL server error type ErrorConnectToServer struct { Msg string } // Error returns error func (e *ErrorConnectToServer) Error() string { return e.Msg } // TODO: revisit this with the semver system func dumpMaps() { // TODO: make this function part of the exporter for name, cmap := range builtinMetricMaps { query, ok := queryOverrides[name] if !ok { fmt.Println(name) } else { for _, queryOverride := range query { fmt.Println(name, queryOverride.versionRange, queryOverride.query) } } for column, details := range cmap.columnMappings { fmt.Printf(" %-40s %v\n", column, details) } fmt.Println() } } var builtinMetricMaps = map[string]intermediateMetricMap{ "pg_stat_bgwriter": { map[string]ColumnMapping{ "checkpoints_timed": {COUNTER, "Number of scheduled checkpoints that have been performed", nil, nil}, "checkpoints_req": {COUNTER, "Number of requested checkpoints that have been performed", nil, nil}, "checkpoint_write_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil, nil}, "checkpoint_sync_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil, nil}, "buffers_checkpoint": {COUNTER, "Number of buffers written during checkpoints", nil, nil}, "buffers_clean": {COUNTER, "Number of buffers written by the background writer", nil, nil}, "maxwritten_clean": {COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil, nil}, "buffers_backend": {COUNTER, "Number of buffers written directly by a backend", nil, nil}, "buffers_backend_fsync": {COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil, nil}, "buffers_alloc": {COUNTER, "Number of buffers allocated", nil, nil}, "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, }, true, 0, }, "pg_stat_database": { map[string]ColumnMapping{ "datid": {LABEL, "OID of a database", nil, nil}, "datname": {LABEL, "Name of this database", nil, nil}, "numbackends": {GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil, nil}, "xact_commit": {COUNTER, "Number of transactions in this database that have been committed", nil, nil}, "xact_rollback": {COUNTER, "Number of transactions in this database that have been rolled back", nil, nil}, "blks_read": {COUNTER, "Number of disk blocks read in this database", nil, nil}, "blks_hit": {COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil, nil}, "tup_returned": {COUNTER, "Number of rows returned by queries in this database", nil, nil}, "tup_fetched": {COUNTER, "Number of rows fetched by queries in this database", nil, nil}, "tup_inserted": {COUNTER, "Number of rows inserted by queries in this database", nil, nil}, "tup_updated": {COUNTER, "Number of rows updated by queries in this database", nil, nil}, "tup_deleted": {COUNTER, "Number of rows deleted by queries in this database", nil, nil}, "conflicts": {COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil, nil}, "temp_files": {COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil, nil}, "temp_bytes": {COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil, nil}, "deadlocks": {COUNTER, "Number of deadlocks detected in this database", nil, nil}, "blk_read_time": {COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil, nil}, "blk_write_time": {COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil, nil}, "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, }, true, 0, }, "pg_stat_database_conflicts": { map[string]ColumnMapping{ "datid": {LABEL, "OID of a database", nil, nil}, "datname": {LABEL, "Name of this database", nil, nil}, "confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil}, "confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil}, "confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil}, "confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil}, "confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil}, }, true, 0, }, "pg_locks": { map[string]ColumnMapping{ "datname": {LABEL, "Name of this database", nil, nil}, "mode": {LABEL, "Type of Lock", nil, nil}, "count": {GAUGE, "Number of locks", nil, nil}, }, true, 0, }, "pg_stat_replication": { map[string]ColumnMapping{ "procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")}, "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")}, "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil}, "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil}, "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil}, "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil}, "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil}, "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil}, "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil}, "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil}, "state": {LABEL, "Current WAL sender state", nil, nil}, "sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")}, "write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, "flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, "replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")}, "sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")}, "write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, "flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, "replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")}, "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil}, "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil}, "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")}, "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil}, "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil}, "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, "active": {DISCARD, "True if this slot is currently actively being used", nil, nil}, "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil}, "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil}, "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil}, "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil}, "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil}, "pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")}, "pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")}, "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")}, "pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")}, "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil}, "write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, "flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, "replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, }, true, 0, }, "pg_replication_slots": { map[string]ColumnMapping{ "slot_name": {LABEL, "Name of the replication slot", nil, nil}, "database": {LABEL, "Name of the database", nil, nil}, "active": {GAUGE, "Flag indicating if the slot is active", nil, nil}, "pg_wal_lsn_diff": {GAUGE, "Replication lag in bytes", nil, nil}, }, true, 0, }, "pg_stat_archiver": { map[string]ColumnMapping{ "archived_count": {COUNTER, "Number of WAL files that have been successfully archived", nil, nil}, "last_archived_wal": {DISCARD, "Name of the last WAL file successfully archived", nil, nil}, "last_archived_time": {DISCARD, "Time of the last successful archive operation", nil, nil}, "failed_count": {COUNTER, "Number of failed attempts for archiving WAL files", nil, nil}, "last_failed_wal": {DISCARD, "Name of the WAL file of the last failed archival operation", nil, nil}, "last_failed_time": {DISCARD, "Time of the last failed archival operation", nil, nil}, "stats_reset": {DISCARD, "Time at which these statistics were last reset", nil, nil}, "last_archive_age": {GAUGE, "Time in seconds since last WAL segment was successfully archived", nil, nil}, }, true, 0, }, "pg_stat_activity": { map[string]ColumnMapping{ "datname": {LABEL, "Name of this database", nil, nil}, "state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")}, "count": {GAUGE, "number of connections in this state", nil, nil}, "max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil}, }, true, 0, }, } // Turn the MetricMap column mapping into a prometheus descriptor mapping. func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace { var metricMap = make(map[string]MetricMapNamespace) for namespace, intermediateMappings := range metricMaps { thisMap := make(map[string]MetricMap) namespace = strings.Replace(namespace, "pg", *metricPrefix, 1) // Get the constant labels var variableLabels []string for columnName, columnMapping := range intermediateMappings.columnMappings { if columnMapping.usage == LABEL { variableLabels = append(variableLabels, columnName) } } for columnName, columnMapping := range intermediateMappings.columnMappings { // Check column version compatibility for the current map // Force to discard if not compatible. if columnMapping.supportedVersions != nil { if !columnMapping.supportedVersions(pgVersion) { // It's very useful to be able to see what columns are being // rejected. level.Debug(logger).Log("msg", "Column is being forced to discard due to version incompatibility", "column", columnName) thisMap[columnName] = MetricMap{ discard: true, conversion: func(_ interface{}) (float64, bool) { return math.NaN(), true }, } continue } } // Determine how to convert the column based on its usage. // nolint: dupl switch columnMapping.usage { case DISCARD, LABEL: thisMap[columnName] = MetricMap{ discard: true, conversion: func(_ interface{}) (float64, bool) { return math.NaN(), true }, } case COUNTER: thisMap[columnName] = MetricMap{ vtype: prometheus.CounterValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case GAUGE: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case HISTOGRAM: thisMap[columnName] = MetricMap{ histogram: true, vtype: prometheus.UntypedValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } thisMap[columnName+"_bucket"] = MetricMap{ histogram: true, discard: true, } thisMap[columnName+"_sum"] = MetricMap{ histogram: true, discard: true, } thisMap[columnName+"_count"] = MetricMap{ histogram: true, discard: true, } case MAPPEDMETRIC: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { text, ok := in.(string) if !ok { return math.NaN(), false } val, ok := columnMapping.mapping[text] if !ok { return math.NaN(), false } return val, true }, } case DURATION: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels), conversion: func(in interface{}) (float64, bool) { var durationString string switch t := in.(type) { case []byte: durationString = string(t) case string: durationString = t default: level.Error(logger).Log("msg", "Duration conversion metric was not a string") return math.NaN(), false } if durationString == "-1" { return math.NaN(), false } d, err := time.ParseDuration(durationString) if err != nil { level.Error(logger).Log("msg", "Failed converting result to metric", "column", columnName, "in", in, "err", err) return math.NaN(), false } return float64(d / time.Millisecond), true }, } } } metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.master, intermediateMappings.cacheSeconds} } return metricMap } type cachedMetrics struct { metrics []prometheus.Metric lastScrape time.Time } // Exporter collects Postgres metrics. It implements prometheus.Collector. type Exporter struct { // Holds a reference to the build in column mappings. Currently this is for testing purposes // only, since it just points to the global. builtinMetricMaps map[string]intermediateMetricMap disableDefaultMetrics, disableSettingsMetrics, autoDiscoverDatabases bool excludeDatabases []string includeDatabases []string dsn []string userQueriesPath string constantLabels prometheus.Labels duration prometheus.Gauge error prometheus.Gauge psqlUp prometheus.Gauge userQueriesError *prometheus.GaugeVec totalScrapes prometheus.Counter // servers are used to allow re-using the DB connection between scrapes. // servers contains metrics map and query overrides. servers *Servers } // ExporterOpt configures Exporter. type ExporterOpt func(*Exporter) // DisableDefaultMetrics configures default metrics export. func DisableDefaultMetrics(b bool) ExporterOpt { return func(e *Exporter) { e.disableDefaultMetrics = b } } // DisableSettingsMetrics configures pg_settings export. func DisableSettingsMetrics(b bool) ExporterOpt { return func(e *Exporter) { e.disableSettingsMetrics = b } } // AutoDiscoverDatabases allows scraping all databases on a database server. func AutoDiscoverDatabases(b bool) ExporterOpt { return func(e *Exporter) { e.autoDiscoverDatabases = b } } // ExcludeDatabases allows to filter out result from AutoDiscoverDatabases func ExcludeDatabases(s string) ExporterOpt { return func(e *Exporter) { e.excludeDatabases = strings.Split(s, ",") } } // IncludeDatabases allows to filter result from AutoDiscoverDatabases func IncludeDatabases(s string) ExporterOpt { return func(e *Exporter) { if len(s) > 0 { e.includeDatabases = strings.Split(s, ",") } } } // WithUserQueriesPath configures user's queries path. func WithUserQueriesPath(p string) ExporterOpt { return func(e *Exporter) { e.userQueriesPath = p } } // WithConstantLabels configures constant labels. func WithConstantLabels(s string) ExporterOpt { return func(e *Exporter) { e.constantLabels = parseConstLabels(s) } } func parseConstLabels(s string) prometheus.Labels { labels := make(prometheus.Labels) s = strings.TrimSpace(s) if len(s) == 0 { return labels } parts := strings.Split(s, ",") for _, p := range parts { keyValue := strings.Split(strings.TrimSpace(p), "=") if len(keyValue) != 2 { level.Error(logger).Log(`Wrong constant labels format, should be "key=value"`, "input", p) continue } key := strings.TrimSpace(keyValue[0]) value := strings.TrimSpace(keyValue[1]) if key == "" || value == "" { continue } labels[key] = value } return labels } // NewExporter returns a new PostgreSQL exporter for the provided DSN. func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter { e := &Exporter{ dsn: dsn, builtinMetricMaps: builtinMetricMaps, } for _, opt := range opts { opt(e) } e.setupInternalMetrics() e.servers = NewServers(ServerWithLabels(e.constantLabels)) return e } func (e *Exporter) setupInternalMetrics() { e.duration = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_duration_seconds", Help: "Duration of the last scrape of metrics from PostgresSQL.", ConstLabels: e.constantLabels, }) e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: exporter, Name: "scrapes_total", Help: "Total number of times PostgresSQL was scraped for metrics.", ConstLabels: e.constantLabels, }) e.error = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_error", Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).", ConstLabels: e.constantLabels, }) e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "up", Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).", ConstLabels: e.constantLabels, }) e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "user_queries_load_error", Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).", ConstLabels: e.constantLabels, }, []string{"filename", "hashsum"}) } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.scrape(ch) ch <- e.duration ch <- e.totalScrapes ch <- e.error ch <- e.psqlUp e.userQueriesError.Collect(ch) } func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), help, nil, labels, ) } func checkPostgresVersion(db *sql.DB, server string) (semver.Version, string, error) { level.Debug(logger).Log("msg", "Querying PostgreSQL version", "server", server) versionRow := db.QueryRow("SELECT version();") var versionString string err := versionRow.Scan(&versionString) if err != nil { return semver.Version{}, "", fmt.Errorf("Error scanning version string on %q: %v", server, err) } semanticVersion, err := parseVersion(versionString) if err != nil { return semver.Version{}, "", fmt.Errorf("Error parsing version string on %q: %v", server, err) } return semanticVersion, versionString, nil } // Check and update the exporters query maps if the version has changed. func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error { semanticVersion, versionString, err := checkPostgresVersion(server.db, server.String()) if err != nil { return fmt.Errorf("Error fetching version string on %q: %v", server, err) } if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) { level.Warn(logger).Log("msg", "PostgreSQL version is lower than our lowest supported version", "server", server, "version", semanticVersion, "lowest_supported_version", lowestSupportedVersion) } // Check if semantic version changed and recalculate maps if needed. if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil { level.Info(logger).Log("msg", "Semantic version changed", "server", server, "from", server.lastMapVersion, "to", semanticVersion) server.mappingMtx.Lock() // Get Default Metrics only for master database if !e.disableDefaultMetrics && server.master { server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps) server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides) } else { server.metricMap = make(map[string]MetricMapNamespace) server.queryOverrides = make(map[string]string) } server.lastMapVersion = semanticVersion if e.userQueriesPath != "" { // Clear the metric while a reload is happening e.userQueriesError.Reset() // Calculate the hashsum of the useQueries userQueriesData, err := ioutil.ReadFile(e.userQueriesPath) if err != nil { level.Error(logger).Log("msg", "Failed to reload user queries", "path", e.userQueriesPath, "err", err) e.userQueriesError.WithLabelValues(e.userQueriesPath, "").Set(1) } else { hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData)) if err := addQueries(userQueriesData, semanticVersion, server); err != nil { level.Error(logger).Log("msg", "Failed to reload user queries", "path", e.userQueriesPath, "err", err) e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1) } else { // Mark user queries as successfully loaded e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(0) } } } server.mappingMtx.Unlock() } // Output the version as a special metric only for master database versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), "Version string as reported by postgres", []string{"version", "short_version"}, server.labels) if !e.disableDefaultMetrics && server.master { ch <- prometheus.MustNewConstMetric(versionDesc, prometheus.UntypedValue, 1, versionString, semanticVersion.String()) } return nil } func (e *Exporter) scrape(ch chan<- prometheus.Metric) { defer func(begun time.Time) { e.duration.Set(time.Since(begun).Seconds()) }(time.Now()) e.totalScrapes.Inc() dsns := e.dsn if e.autoDiscoverDatabases { dsns = e.discoverDatabaseDSNs() } var errorsCount int var connectionErrorsCount int for _, dsn := range dsns { if err := e.scrapeDSN(ch, dsn); err != nil { errorsCount++ level.Error(logger).Log("err", err) if _, ok := err.(*ErrorConnectToServer); ok { connectionErrorsCount++ } } } switch { case connectionErrorsCount >= len(dsns): e.psqlUp.Set(0) default: e.psqlUp.Set(1) // Didn't fail, can mark connection as up for this scrape. } switch errorsCount { case 0: e.error.Set(0) default: e.error.Set(1) } } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/postgres_exporter_integration_test.go000066400000000000000000000112361417033021000336200ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // These are specialized integration tests. We only build them when we're doing // a lot of additional work to keep the external docker environment they require // working. //go:build integration // +build integration package main import ( "fmt" "os" "strings" "testing" _ "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type IntegrationSuite struct { e *Exporter } var _ = Suite(&IntegrationSuite{}) func (s *IntegrationSuite) SetUpSuite(c *C) { dsn := os.Getenv("DATA_SOURCE_NAME") c.Assert(dsn, Not(Equals), "") exporter := NewExporter(strings.Split(dsn, ",")) c.Assert(exporter, NotNil) // Assign the exporter to the suite s.e = exporter prometheus.MustRegister(exporter) } // TODO: it would be nice if cu didn't mostly just recreate the scrape function func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) { // Setup a dummy channel to consume metrics ch := make(chan prometheus.Metric, 100) go func() { for range ch { } }() for _, dsn := range s.e.dsn { // Open a database connection server, err := NewServer(dsn) c.Assert(server, NotNil) c.Assert(err, IsNil) // Do a version update err = s.e.checkMapVersions(ch, server) c.Assert(err, IsNil) err = querySettings(ch, server) if !c.Check(err, Equals, nil) { fmt.Println("## ERRORS FOUND") fmt.Println(err) } // This should never happen in our test cases. errMap := queryNamespaceMappings(ch, server) if !c.Check(len(errMap), Equals, 0) { fmt.Println("## NAMESPACE ERRORS FOUND") for namespace, err := range errMap { fmt.Println(namespace, ":", err) } } server.Close() } } // TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash // the exporter. Related to https://github.com/prometheus-community/postgres_exporter/issues/93 // although not a replication of the scenario. func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) { // Setup a dummy channel to consume metrics ch := make(chan prometheus.Metric, 100) go func() { for range ch { } }() // Send a bad DSN exporter := NewExporter([]string{"invalid dsn"}) c.Assert(exporter, NotNil) exporter.scrape(ch) // Send a DSN to a non-listening port. exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"}) c.Assert(exporter, NotNil) exporter.scrape(ch) } // TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out // of an exporter to test that the default metric handling code can cope with unknown columns. func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) { // Setup a dummy channel to consume metrics ch := make(chan prometheus.Metric, 100) go func() { for range ch { } }() dsn := os.Getenv("DATA_SOURCE_NAME") c.Assert(dsn, Not(Equals), "") exporter := NewExporter(strings.Split(dsn, ",")) c.Assert(exporter, NotNil) // Convert the default maps into a list of empty maps. emptyMaps := make(map[string]intermediateMetricMap, 0) for k := range exporter.builtinMetricMaps { emptyMaps[k] = intermediateMetricMap{ map[string]ColumnMapping{}, true, 0, } } exporter.builtinMetricMaps = emptyMaps // scrape the exporter and make sure it works exporter.scrape(ch) } // TestExtendQueriesDoesntCrash tests that specifying extend.query-path doesn't // crash. func (s *IntegrationSuite) TestExtendQueriesDoesntCrash(c *C) { // Setup a dummy channel to consume metrics ch := make(chan prometheus.Metric, 100) go func() { for range ch { } }() dsn := os.Getenv("DATA_SOURCE_NAME") c.Assert(dsn, Not(Equals), "") exporter := NewExporter( strings.Split(dsn, ","), WithUserQueriesPath("../user_queries_test.yaml"), ) c.Assert(exporter, NotNil) // scrape the exporter and make sure it works exporter.scrape(ch) } func (s *IntegrationSuite) TestAutoDiscoverDatabases(c *C) { dsn := os.Getenv("DATA_SOURCE_NAME") exporter := NewExporter( strings.Split(dsn, ","), ) c.Assert(exporter, NotNil) dsns := exporter.discoverDatabaseDSNs() c.Assert(len(dsns), Equals, 2) } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/postgres_exporter_test.go000066400000000000000000000247201417033021000312170ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !integration // +build !integration package main import ( "io/ioutil" "math" "os" "reflect" "testing" "time" "github.com/blang/semver" "github.com/prometheus/client_golang/prometheus" . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type FunctionalSuite struct { } var _ = Suite(&FunctionalSuite{}) func (s *FunctionalSuite) SetUpSuite(c *C) { } func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) { testMetricMap := map[string]intermediateMetricMap{ "test_namespace": { map[string]ColumnMapping{ "metric_which_stays": {COUNTER, "This metric should not be eliminated", nil, nil}, "metric_which_discards": {COUNTER, "This metric should be forced to DISCARD", nil, nil}, }, true, 0, }, } { // No metrics should be eliminated resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) c.Check( resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, Equals, false, ) c.Check( resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, Equals, false, ) } // nolint: dupl { // Update the map so the discard metric should be eliminated discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric // Discard metric should be discarded resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) c.Check( resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, Equals, false, ) c.Check( resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, Equals, true, ) } // nolint: dupl { // Update the map so the discard metric should be kept but has a version discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric // Discard metric should be discarded resultMap := makeDescMap(semver.MustParse("0.0.2"), prometheus.Labels{}, testMetricMap) c.Check( resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, Equals, false, ) c.Check( resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, Equals, false, ) } } // test read username and password from file func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) { err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") err = os.Setenv("DATA_SOURCE_PASS_FILE", "./tests/userpass_file") c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_PASS_FILE") err = os.Setenv("DATA_SOURCE_URI", "localhost:5432/?sslmode=disable") c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_URI") var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable" dsn, err := getDataSources() if err != nil { c.Errorf("Unexpected error reading datasources") } if len(dsn) == 0 { c.Errorf("Expected one data source, zero found") } if dsn[0] != expected { c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], expected) } } // test read DATA_SOURCE_NAME from environment func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) { envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled" err := os.Setenv("DATA_SOURCE_NAME", envDsn) c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_NAME") dsn, err := getDataSources() if err != nil { c.Errorf("Unexpected error reading datasources") } if len(dsn) == 0 { c.Errorf("Expected one data source, zero found") } if dsn[0] != envDsn { c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) } } // test DATA_SOURCE_NAME is used even if username and password environment variables are set func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) { envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled" err := os.Setenv("DATA_SOURCE_NAME", envDsn) c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_NAME") err = os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") err = os.Setenv("DATA_SOURCE_PASS", "envUserPass") c.Assert(err, IsNil) defer UnsetEnvironment(c, "DATA_SOURCE_PASS") dsn, err := getDataSources() if err != nil { c.Errorf("Unexpected error reading datasources") } if len(dsn) == 0 { c.Errorf("Expected one data source, zero found") } if dsn[0] != envDsn { c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) } } func (s *FunctionalSuite) TestPostgresVersionParsing(c *C) { type TestCase struct { input string expected string } cases := []TestCase{ { input: "PostgreSQL 10.1 on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18) 6.3.0 20170516, 64-bit", expected: "10.1.0", }, { input: "PostgreSQL 9.5.4, compiled by Visual C++ build 1800, 64-bit", expected: "9.5.4", }, { input: "EnterpriseDB 9.6.5.10 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16), 64-bit", expected: "9.6.5", }, } for _, cs := range cases { ver, err := parseVersion(cs.input) c.Assert(err, IsNil) c.Assert(ver.String(), Equals, cs.expected) } } func (s *FunctionalSuite) TestParseFingerprint(c *C) { cases := []struct { url string fingerprint string err string }{ { url: "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled", fingerprint: "localhost:55432", }, { url: "postgresql://userDsn:passwordDsn%3D@localhost:55432/?sslmode=disabled", fingerprint: "localhost:55432", }, { url: "port=1234", fingerprint: "localhost:1234", }, { url: "host=example", fingerprint: "example:5432", }, { url: "xyz", err: "malformed dsn \"xyz\"", }, } for _, cs := range cases { f, err := parseFingerprint(cs.url) if cs.err == "" { c.Assert(err, IsNil) } else { c.Assert(err, NotNil) c.Assert(err.Error(), Equals, cs.err) } c.Assert(f, Equals, cs.fingerprint) } } func (s *FunctionalSuite) TestParseConstLabels(c *C) { cases := []struct { s string labels prometheus.Labels }{ { s: "a=b", labels: prometheus.Labels{ "a": "b", }, }, { s: "", labels: prometheus.Labels{}, }, { s: "a=b, c=d", labels: prometheus.Labels{ "a": "b", "c": "d", }, }, { s: "a=b, xyz", labels: prometheus.Labels{ "a": "b", }, }, { s: "a=", labels: prometheus.Labels{}, }, } for _, cs := range cases { labels := parseConstLabels(cs.s) if !reflect.DeepEqual(labels, cs.labels) { c.Fatalf("labels not equal (%v -> %v)", labels, cs.labels) } } } func UnsetEnvironment(c *C, d string) { err := os.Unsetenv(d) c.Assert(err, IsNil) } type isNaNChecker struct { *CheckerInfo } var IsNaN Checker = &isNaNChecker{ &CheckerInfo{Name: "IsNaN", Params: []string{"value"}}, } func (checker *isNaNChecker) Check(params []interface{}, names []string) (result bool, error string) { param, ok := (params[0]).(float64) if !ok { return false, "obtained value type is not a float" } return math.IsNaN(param), "" } // test boolean metric type gets converted to float func (s *FunctionalSuite) TestBooleanConversionToValueAndString(c *C) { type TestCase struct { input interface{} expectedString string expectedValue float64 expectedCount uint64 expectedOK bool } cases := []TestCase{ { input: true, expectedString: "true", expectedValue: 1.0, expectedCount: 1, expectedOK: true, }, { input: false, expectedString: "false", expectedValue: 0.0, expectedCount: 0, expectedOK: true, }, { input: nil, expectedString: "", expectedValue: math.NaN(), expectedCount: 0, expectedOK: true, }, { input: TestCase{}, expectedString: "", expectedValue: math.NaN(), expectedCount: 0, expectedOK: false, }, { input: 123.0, expectedString: "123", expectedValue: 123.0, expectedCount: 123, expectedOK: true, }, { input: "123", expectedString: "123", expectedValue: 123.0, expectedCount: 123, expectedOK: true, }, { input: []byte("123"), expectedString: "123", expectedValue: 123.0, expectedCount: 123, expectedOK: true, }, { input: time.Unix(1600000000, 0), expectedString: "1600000000", expectedValue: 1600000000.0, expectedCount: 1600000000, expectedOK: true, }, } for _, cs := range cases { value, ok := dbToFloat64(cs.input) if math.IsNaN(cs.expectedValue) { c.Assert(value, IsNaN) } else { c.Assert(value, Equals, cs.expectedValue) } c.Assert(ok, Equals, cs.expectedOK) count, ok := dbToUint64(cs.input) c.Assert(count, Equals, cs.expectedCount) c.Assert(ok, Equals, cs.expectedOK) str, ok := dbToString(cs.input) c.Assert(str, Equals, cs.expectedString) c.Assert(ok, Equals, cs.expectedOK) } } func (s *FunctionalSuite) TestParseUserQueries(c *C) { userQueriesData, err := ioutil.ReadFile("./tests/user_queries_ok.yaml") if err == nil { metricMaps, newQueryOverrides, err := parseUserQueries(userQueriesData) c.Assert(err, Equals, nil) c.Assert(metricMaps, NotNil) c.Assert(newQueryOverrides, NotNil) if len(metricMaps) != 2 { c.Errorf("Expected 2 metrics from user file, got %d", len(metricMaps)) } } } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/queries.go000066400000000000000000000225261417033021000260410ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "errors" "fmt" "github.com/blang/semver" "github.com/go-kit/log/level" "gopkg.in/yaml.v2" ) // UserQuery represents a user defined query type UserQuery struct { Query string `yaml:"query"` Metrics []Mapping `yaml:"metrics"` Master bool `yaml:"master"` // Querying only for master database CacheSeconds uint64 `yaml:"cache_seconds"` // Number of seconds to cache the namespace result metrics for. RunOnServer string `yaml:"runonserver"` // Querying to run on which server version } // UserQueries represents a set of UserQuery objects type UserQueries map[string]UserQuery // OverrideQuery 's are run in-place of simple namespace look ups, and provide // advanced functionality. But they have a tendency to postgres version specific. // There aren't too many versions, so we simply store customized versions using // the semver matching we do for columns. type OverrideQuery struct { versionRange semver.Range query string } // Overriding queries for namespaces above. // TODO: validate this is a closed set in tests, and there are no overlaps var queryOverrides = map[string][]OverrideQuery{ "pg_locks": { { semver.MustParseRange(">0.0.0"), `SELECT pg_database.datname,tmp.mode,COALESCE(count,0) as count FROM ( VALUES ('accesssharelock'), ('rowsharelock'), ('rowexclusivelock'), ('shareupdateexclusivelock'), ('sharelock'), ('sharerowexclusivelock'), ('exclusivelock'), ('accessexclusivelock'), ('sireadlock') ) AS tmp(mode) CROSS JOIN pg_database LEFT JOIN (SELECT database, lower(mode) AS mode,count(*) AS count FROM pg_locks WHERE database IS NOT NULL GROUP BY database, lower(mode) ) AS tmp2 ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database ORDER BY 1`, }, }, "pg_stat_replication": { { semver.MustParseRange(">=10.0.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_wal_lsn() end) AS pg_current_wal_lsn, (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes, (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff FROM pg_stat_replication `, }, { semver.MustParseRange(">=9.2.0 <10.0.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location, (case pg_is_in_recovery() when 't' then null else pg_xlog_location_diff(pg_current_xlog_location(), replay_location)::float end) AS pg_xlog_location_diff FROM pg_stat_replication `, }, { semver.MustParseRange("<9.2.0"), ` SELECT *, (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location FROM pg_stat_replication `, }, }, "pg_replication_slots": { { semver.MustParseRange(">=9.4.0 <10.0.0"), ` SELECT slot_name, database, active, pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) FROM pg_replication_slots `, }, { semver.MustParseRange(">=10.0.0"), ` SELECT slot_name, database, active, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) FROM pg_replication_slots `, }, }, "pg_stat_archiver": { { semver.MustParseRange(">=0.0.0"), ` SELECT *, extract(epoch from now() - last_archived_time) AS last_archive_age FROM pg_stat_archiver `, }, }, "pg_stat_activity": { // This query only works { semver.MustParseRange(">=9.2.0"), ` SELECT pg_database.datname, tmp.state, COALESCE(count,0) as count, COALESCE(max_tx_duration,0) as max_tx_duration FROM ( VALUES ('active'), ('idle'), ('idle in transaction'), ('idle in transaction (aborted)'), ('fastpath function call'), ('disabled') ) AS tmp(state) CROSS JOIN pg_database LEFT JOIN ( SELECT datname, state, count(*) AS count, MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration FROM pg_stat_activity GROUP BY datname,state) AS tmp2 ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname `, }, { semver.MustParseRange("<9.2.0"), ` SELECT datname, 'unknown' AS state, COALESCE(count(*),0) AS count, COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration FROM pg_stat_activity GROUP BY datname `, }, }, } // Convert the query override file to the version-specific query override file // for the exporter. func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]OverrideQuery) map[string]string { resultMap := make(map[string]string) for name, overrideDef := range queryOverrides { // Find a matching semver. We make it an error to have overlapping // ranges at test-time, so only 1 should ever match. matched := false for _, queryDef := range overrideDef { if queryDef.versionRange(pgVersion) { resultMap[name] = queryDef.query matched = true break } } if !matched { level.Warn(logger).Log("msg", "No query matched override, disabling metric space", "name", name) resultMap[name] = "" } } return resultMap } func parseUserQueries(content []byte) (map[string]intermediateMetricMap, map[string]string, error) { var userQueries UserQueries err := yaml.Unmarshal(content, &userQueries) if err != nil { return nil, nil, err } // Stores the loaded map representation metricMaps := make(map[string]intermediateMetricMap) newQueryOverrides := make(map[string]string) for metric, specs := range userQueries { level.Debug(logger).Log("msg", "New user metric namespace from YAML metric", "metric", metric, "cache_seconds", specs.CacheSeconds) newQueryOverrides[metric] = specs.Query metricMap, ok := metricMaps[metric] if !ok { // Namespace for metric not found - add it. newMetricMap := make(map[string]ColumnMapping) metricMap = intermediateMetricMap{ columnMappings: newMetricMap, master: specs.Master, cacheSeconds: specs.CacheSeconds, } metricMaps[metric] = metricMap } for _, metric := range specs.Metrics { for name, mappingOption := range metric { var columnMapping ColumnMapping tmpUsage, _ := stringToColumnUsage(mappingOption.Usage) columnMapping.usage = tmpUsage columnMapping.description = mappingOption.Description // TODO: we should support cu columnMapping.mapping = nil // Should we support this for users? columnMapping.supportedVersions = nil metricMap.columnMappings[name] = columnMapping } } } return metricMaps, newQueryOverrides, nil } // Add queries to the builtinMetricMaps and queryOverrides maps. Added queries do not // respect version requirements, because it is assumed that the user knows // what they are doing with their version of postgres. // // This function modifies metricMap and queryOverrideMap to contain the new // queries. // TODO: test code for all cu. // TODO: the YAML this supports is "non-standard" - we should move away from it. func addQueries(content []byte, pgVersion semver.Version, server *Server) error { metricMaps, newQueryOverrides, err := parseUserQueries(content) if err != nil { return err } // Convert the loaded metric map into exporter representation partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps) // Merge the two maps (which are now quite flatteend) for k, v := range partialExporterMap { _, found := server.metricMap[k] if found { level.Debug(logger).Log("msg", "Overriding metric from user YAML file", "metric", k) } else { level.Debug(logger).Log("msg", "Adding new metric from user YAML file", "metric", k) } server.metricMap[k] = v } // Merge the query override map for k, v := range newQueryOverrides { _, found := server.queryOverrides[k] if found { level.Debug(logger).Log("msg", "Overriding query override from user YAML file", "query_override", k) } else { level.Debug(logger).Log("msg", "Adding new query override from user YAML file", "query_override", k) } server.queryOverrides[k] = v } return nil } func queryDatabases(server *Server) ([]string, error) { rows, err := server.db.Query("SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false AND datname != current_database()") if err != nil { return nil, fmt.Errorf("Error retrieving databases: %v", err) } defer rows.Close() // nolint: errcheck var databaseName string result := make([]string, 0) for rows.Next() { err = rows.Scan(&databaseName) if err != nil { return nil, errors.New(fmt.Sprintln("Error retrieving rows:", err)) } result = append(result, databaseName) } return result, nil } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/server.go000066400000000000000000000111001417033021000256540ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "database/sql" "fmt" "sync" "time" "github.com/blang/semver" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // Server describes a connection to Postgres. // Also it contains metrics map and query overrides. type Server struct { db *sql.DB labels prometheus.Labels master bool runonserver string // Last version used to calculate metric map. If mismatch on scrape, // then maps are recalculated. lastMapVersion semver.Version // Currently active metric map metricMap map[string]MetricMapNamespace // Currently active query overrides queryOverrides map[string]string mappingMtx sync.RWMutex // Currently cached metrics metricCache map[string]cachedMetrics cacheMtx sync.Mutex } // ServerOpt configures a server. type ServerOpt func(*Server) // ServerWithLabels configures a set of labels. func ServerWithLabels(labels prometheus.Labels) ServerOpt { return func(s *Server) { for k, v := range labels { s.labels[k] = v } } } // NewServer establishes a new connection using DSN. func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { fingerprint, err := parseFingerprint(dsn) if err != nil { return nil, err } db, err := sql.Open("postgres", dsn) if err != nil { return nil, err } db.SetMaxOpenConns(1) db.SetMaxIdleConns(1) level.Info(logger).Log("msg", "Established new database connection", "fingerprint", fingerprint) s := &Server{ db: db, master: false, labels: prometheus.Labels{ serverLabelName: fingerprint, }, metricCache: make(map[string]cachedMetrics), } for _, opt := range opts { opt(s) } return s, nil } // Close disconnects from Postgres. func (s *Server) Close() error { return s.db.Close() } // Ping checks connection availability and possibly invalidates the connection if it fails. func (s *Server) Ping() error { if err := s.db.Ping(); err != nil { if cerr := s.Close(); cerr != nil { level.Error(logger).Log("msg", "Error while closing non-pinging DB connection", "server", s, "err", cerr) } return err } return nil } // String returns server's fingerprint. func (s *Server) String() string { return s.labels[serverLabelName] } // Scrape loads metrics. func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error { s.mappingMtx.RLock() defer s.mappingMtx.RUnlock() var err error if !disableSettingsMetrics && s.master { if err = querySettings(ch, s); err != nil { err = fmt.Errorf("error retrieving settings: %s", err) } } errMap := queryNamespaceMappings(ch, s) if len(errMap) > 0 { err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) } return err } // Servers contains a collection of servers to Postgres. type Servers struct { m sync.Mutex servers map[string]*Server opts []ServerOpt } // NewServers creates a collection of servers to Postgres. func NewServers(opts ...ServerOpt) *Servers { return &Servers{ servers: make(map[string]*Server), opts: opts, } } // GetServer returns established connection from a collection. func (s *Servers) GetServer(dsn string) (*Server, error) { s.m.Lock() defer s.m.Unlock() var err error var ok bool errCount := 0 // start at zero because we increment before doing work retries := 1 var server *Server for { if errCount++; errCount > retries { return nil, err } server, ok = s.servers[dsn] if !ok { server, err = NewServer(dsn, s.opts...) if err != nil { time.Sleep(time.Duration(errCount) * time.Second) continue } s.servers[dsn] = server } if err = server.Ping(); err != nil { delete(s.servers, dsn) time.Sleep(time.Duration(errCount) * time.Second) continue } break } return server, nil } // Close disconnects from all known servers. func (s *Servers) Close() { s.m.Lock() defer s.m.Unlock() for _, server := range s.servers { if err := server.Close(); err != nil { level.Error(logger).Log("msg", "Failed to close connection", "server", server, "err", err) } } } prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/000077500000000000000000000000001417033021000251705ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replication/000077500000000000000000000000001417033021000326125ustar00rootroot00000000000000Dockerfile000077500000000000000000000006131417033021000345300ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replicationFROM postgres:11 MAINTAINER Daniel Dent (https://www.danieldent.com) ENV PG_MAX_WAL_SENDERS 8 ENV PG_WAL_KEEP_SEGMENTS 8 RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping COPY setup-replication.sh /docker-entrypoint-initdb.d/ COPY docker-entrypoint.sh /docker-entrypoint.sh RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh Dockerfile.p2000066400000000000000000000006241417033021000350470ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replicationFROM postgres:{{VERSION}} MAINTAINER Daniel Dent (https://www.danieldent.com) ENV PG_MAX_WAL_SENDERS 8 ENV PG_WAL_KEEP_SEGMENTS 8 RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping COPY setup-replication.sh /docker-entrypoint-initdb.d/ COPY docker-entrypoint.sh /docker-entrypoint.sh RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh README.md000066400000000000000000000006131417033021000340120ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replication# Replicated postgres cluster in docker. Upstream is forked from https://github.com/DanielDent/docker-postgres-replication My version lives at https://github.com/wrouesnel/docker-postgres-replication This very simple docker-compose file lets us stand up a replicated postgres cluster so we can test streaming. # TODO: Pull in p2 and template the Dockerfile so we can test multiple versions. docker-compose.yml000066400000000000000000000012631417033021000361720ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replication version: '2' services: pg-master: build: '.' image: 'danieldent/postgres-replication' restart: 'always' environment: POSTGRES_USER: 'postgres' POSTGRES_PASSWORD: 'postgres' PGDATA: '/var/lib/postgresql/data/pgdata' volumes: - '/var/lib/postgresql/data' expose: - '5432' pg-slave: build: '.' image: 'danieldent/postgres-replication' restart: 'always' environment: POSTGRES_USER: 'postgres' POSTGRES_PASSWORD: 'postgres' PGDATA: '/var/lib/postgresql/data/pgdata' REPLICATE_FROM: 'pg-master' volumes: - '/var/lib/postgresql/data' expose: - '5432' links: - 'pg-master' docker-entrypoint.sh000077500000000000000000000076161417033021000365640ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replication#!/bin/bash # Backwards compatibility for old variable names (deprecated) if [ "x$PGUSER" != "x" ]; then POSTGRES_USER=$PGUSER fi if [ "x$PGPASSWORD" != "x" ]; then POSTGRES_PASSWORD=$PGPASSWORD fi # Forwards-compatibility for old variable names (pg_basebackup uses them) if [ "x$PGPASSWORD" = "x" ]; then export PGPASSWORD=$POSTGRES_PASSWORD fi # Based on official postgres package's entrypoint script (https://hub.docker.com/_/postgres/) # Modified to be able to set up a slave. The docker-entrypoint-initdb.d hook provided is inadequate. set -e if [ "${1:0:1}" = '-' ]; then set -- postgres "$@" fi if [ "$1" = 'postgres' ]; then mkdir -p "$PGDATA" chmod 700 "$PGDATA" chown -R postgres "$PGDATA" mkdir -p /run/postgresql chmod g+s /run/postgresql chown -R postgres /run/postgresql # look specifically for PG_VERSION, as it is expected in the DB dir if [ ! -s "$PGDATA/PG_VERSION" ]; then if [ "x$REPLICATE_FROM" == "x" ]; then eval "gosu postgres initdb $POSTGRES_INITDB_ARGS" else until /bin/ping -c 1 -W 1 ${REPLICATE_FROM} do echo "Waiting for master to ping..." sleep 1s done until gosu postgres pg_basebackup -h ${REPLICATE_FROM} -D ${PGDATA} -U ${POSTGRES_USER} -vP -w do echo "Waiting for master to connect..." sleep 1s done fi # check password first so we can output the warning before postgres # messes it up if [ ! -z "$POSTGRES_PASSWORD" ]; then pass="PASSWORD '$POSTGRES_PASSWORD'" authMethod=md5 else # The - option suppresses leading tabs but *not* spaces. :) cat >&2 <<-'EOWARN' **************************************************** WARNING: No password has been set for the database. This will allow anyone with access to the Postgres port to access your database. In Docker's default configuration, this is effectively any other container on the same system. Use "-e POSTGRES_PASSWORD=password" to set it in "docker run". **************************************************** EOWARN pass= authMethod=trust fi if [ "x$REPLICATE_FROM" == "x" ]; then { echo; echo "host replication all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null { echo; echo "host all all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null # internal start of server in order to allow set-up using psql-client # does not listen on external TCP/IP and waits until start finishes gosu postgres pg_ctl -D "$PGDATA" \ -o "-c listen_addresses='localhost'" \ -w start : ${POSTGRES_USER:=postgres} : ${POSTGRES_DB:=$POSTGRES_USER} export POSTGRES_USER POSTGRES_DB psql=( "psql" "-v" "ON_ERROR_STOP=1" ) if [ "$POSTGRES_DB" != 'postgres' ]; then "${psql[@]}" --username postgres <<-EOSQL CREATE DATABASE "$POSTGRES_DB" ; EOSQL echo fi if [ "$POSTGRES_USER" = 'postgres' ]; then op='ALTER' else op='CREATE' fi "${psql[@]}" --username postgres <<-EOSQL $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; EOSQL echo fi psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) echo for f in /docker-entrypoint-initdb.d/*; do case "$f" in *.sh) echo "$0: running $f"; . "$f" ;; *.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;; *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; *) echo "$0: ignoring $f" ;; esac echo done if [ "x$REPLICATE_FROM" == "x" ]; then gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop fi echo echo 'PostgreSQL init process complete; ready for start up.' echo fi # We need this health check so we know when it's started up. touch /tmp/.postgres_init_complete exec gosu postgres "$@" fi exec "$@" setup-replication.sh000077500000000000000000000010101417033021000365310ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/docker-postgres-replication#!/bin/bash if [ "x$REPLICATE_FROM" == "x" ]; then cat >> ${PGDATA}/postgresql.conf < ${PGDATA}/recovery.conf <&2 echo "Test Binary: $test_binary" 1>&2 [ -z "$postgres_exporter" ] && echo "Missing exporter binary" && exit 1 [ -z "$test_binary" ] && echo "Missing test binary" && exit 1 cd "$DIR" || exit 1 VERSIONS=( \ 9.4 \ 9.5 \ 9.6 \ 10 \ 11 \ ) wait_for_postgres(){ local container=$1 local ip=$2 local port=$3 if [ -z "$ip" ]; then echo "No IP specified." 1>&2 exit 1 fi if [ -z "$port" ]; then echo "No port specified." 1>&2 exit 1 fi local wait_start wait_start=$(date +%s) || exit 1 echo "Waiting for postgres to start listening..." while ! docker exec "$container" pg_isready --host="$ip" --port="$port" &> /dev/null; do if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then echo "Timed out waiting for postgres to start!" 1>&2 exit 1 fi sleep 1 done echo "Postgres is online at $ip:$port" } wait_for_exporter() { local wait_start wait_start=$(date +%s) || exit 1 echo "Waiting for exporter to start..." while ! nc -z localhost "$exporter_port" ; do if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then echo "Timed out waiting for exporter!" 1>&2 exit 1 fi sleep 1 done echo "Exporter is online at localhost:$exporter_port" } smoketest_postgres() { local version=$1 local CONTAINER_NAME=postgres_exporter-test-smoke local TIMEOUT=30 local IMAGE_NAME=postgres local CUR_IMAGE=$IMAGE_NAME:$version echo "#######################" echo "Standalone Postgres $version" echo "#######################" local docker_cmd="docker run -d -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD $CUR_IMAGE" echo "Docker Cmd: $docker_cmd" CONTAINER_NAME=$($docker_cmd) standalone_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $CONTAINER_NAME) # shellcheck disable=SC2064 trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; exit 1" EXIT INT TERM wait_for_postgres "$CONTAINER_NAME" "$standalone_ip" 5432 # Run the test binary. DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $test_binary || exit $? # Extract a raw metric list. DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $postgres_exporter \ --log.level=debug --web.listen-address=:$exporter_port & exporter_pid=$! # shellcheck disable=SC2064 trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; kill $exporter_pid; exit 1" EXIT INT TERM wait_for_exporter # Dump the metrics to a file. if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.single.$version.prom" ; then echo "Failed on postgres $version (standalone $DOCKER_IMAGE)" 1>&2 kill $exporter_pid exit 1 fi # HACK test: check pg_up is a 1 - TODO: expand integration tests to include metric consumption if ! grep 'pg_up.* 1' $METRICS_DIR/.metrics.single.$version.prom ; then echo "pg_up metric was not 1 despite exporter and database being up" kill $exporter_pid exit 1 fi kill $exporter_pid docker kill "$CONTAINER_NAME" docker rm -v "$CONTAINER_NAME" trap - EXIT INT TERM echo "#######################" echo "Replicated Postgres $version" echo "#######################" old_pwd=$(pwd) cd docker-postgres-replication || exit 1 if ! VERSION="$version" p2 -t Dockerfile.p2 -o Dockerfile ; then echo "Templating failed" 1>&2 exit 1 fi trap "docker-compose logs; docker-compose down ; docker-compose rm -v; exit 1" EXIT INT TERM local compose_cmd="POSTGRES_PASSWORD=$POSTGRES_PASSWORD docker-compose up -d --force-recreate --build" echo "Compose Cmd: $compose_cmd" eval "$compose_cmd" master_container=$(docker-compose ps -q pg-master) slave_container=$(docker-compose ps -q pg-slave) master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$master_container") slave_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$slave_container") echo "Got master IP: $master_ip" wait_for_postgres "$master_container" "$master_ip" 5432 wait_for_postgres "$slave_container" "$slave_ip" 5432 DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $test_binary || exit $? DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $postgres_exporter \ --log.level=debug --web.listen-address=:$exporter_port & exporter_pid=$! # shellcheck disable=SC2064 trap "docker-compose logs; docker-compose down ; docker-compose rm -v ; kill $exporter_pid; exit 1" EXIT INT TERM wait_for_exporter if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.replicated.$version.prom" ; then echo "Failed on postgres $version (replicated $DOCKER_IMAGE)" 1>&2 exit 1 fi kill $exporter_pid docker-compose down docker-compose rm -v trap - EXIT INT TERM cd "$old_pwd" || exit 1 } # Start pulling the docker images in advance for version in "${VERSIONS[@]}"; do docker pull "postgres:$version" > /dev/null & done for version in "${VERSIONS[@]}"; do echo "Testing postgres version $version" smoketest_postgres "$version" done prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/user_queries_ok.yaml000066400000000000000000000022631417033021000312630ustar00rootroot00000000000000pg_locks_mode: query: "WITH q_locks AS (select * from pg_locks where pid != pg_backend_pid() and database = (select oid from pg_database where datname = current_database())) SELECT (select current_database()) as datname, lockmodes AS tag_lockmode, coalesce((select count(*) FROM q_locks WHERE mode = lockmodes), 0) AS count FROM unnest('{AccessShareLock, ExclusiveLock, RowShareLock, RowExclusiveLock, ShareLock, ShareRowExclusiveLock, AccessExclusiveLock, ShareUpdateExclusiveLock}'::text[]) lockmodes;" metrics: - datname: usage: "LABEL" description: "Database name" - tag_lockmode: usage: "LABEL" description: "Lock type" - count: usage: "GAUGE" description: "Number of lock" pg_wal: query: "select current_database() as datname, case when pg_is_in_recovery() = false then pg_xlog_location_diff(pg_current_xlog_location(), '0/0')::int8 else pg_xlog_location_diff(pg_last_xlog_replay_location(), '0/0')::int8 end as xlog_location_b;" metrics: - datname: usage: "LABEL" description: "Database name" - xlog_location_b: usage: "COUNTER" description: "current transaction log write location" prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/user_queries_test.yaml000066400000000000000000000034061417033021000316310ustar00rootroot00000000000000random: query: | WITH data AS (SELECT floor(random()*10) AS d FROM generate_series(1,100)), metrics AS (SELECT SUM(d) AS sum, COUNT(*) AS count FROM data), buckets AS (SELECT le, SUM(CASE WHEN d <= le THEN 1 ELSE 0 END) AS d FROM data, UNNEST(ARRAY[1, 2, 4, 8]) AS le GROUP BY le) SELECT sum AS histogram_sum, count AS histogram_count, ARRAY_AGG(le) AS histogram, ARRAY_AGG(d) AS histogram_bucket, ARRAY_AGG(le) AS missing, ARRAY_AGG(le) AS missing_sum, ARRAY_AGG(d) AS missing_sum_bucket, ARRAY_AGG(le) AS missing_count, ARRAY_AGG(d) AS missing_count_bucket, sum AS missing_count_sum, ARRAY_AGG(le) AS unexpected_sum, ARRAY_AGG(d) AS unexpected_sum_bucket, 'data' AS unexpected_sum_sum, ARRAY_AGG(le) AS unexpected_count, ARRAY_AGG(d) AS unexpected_count_bucket, sum AS unexpected_count_sum, 'nan'::varchar AS unexpected_count_count, ARRAY_AGG(le) AS unexpected_bytes, ARRAY_AGG(d) AS unexpected_bytes_bucket, sum AS unexpected_bytes_sum, 'nan'::bytea AS unexpected_bytes_count FROM metrics, buckets GROUP BY 1,2 metrics: - histogram: usage: "HISTOGRAM" description: "Random data" - missing: usage: "HISTOGRAM" description: "nonfatal error" - missing_sum: usage: "HISTOGRAM" description: "nonfatal error" - missing_count: usage: "HISTOGRAM" description: "nonfatal error" - unexpected_sum: usage: "HISTOGRAM" description: "nonfatal error" - unexpected_count: usage: "HISTOGRAM" description: "nonfatal error" - unexpected_bytes: usage: "HISTOGRAM" description: "nonfatal error" prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/username_file000066400000000000000000000000321417033021000277240ustar00rootroot00000000000000custom_username$&+,/:;=?@ prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/tests/userpass_file000066400000000000000000000000321417033021000277520ustar00rootroot00000000000000custom_password$&+,/:;=?@ prometheus-postgres-exporter-0.10.1/cmd/postgres_exporter/util.go000066400000000000000000000114421417033021000253340ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "math" "net/url" "strconv" "strings" "time" "github.com/go-kit/log/level" "github.com/lib/pq" ) func contains(a []string, x string) bool { for _, n := range a { if x == n { return true } } return false } // convert a string to the corresponding ColumnUsage func stringToColumnUsage(s string) (ColumnUsage, error) { var u ColumnUsage var err error switch s { case "DISCARD": u = DISCARD case "LABEL": u = LABEL case "COUNTER": u = COUNTER case "GAUGE": u = GAUGE case "HISTOGRAM": u = HISTOGRAM case "MAPPEDMETRIC": u = MAPPEDMETRIC case "DURATION": u = DURATION default: err = fmt.Errorf("wrong ColumnUsage given : %s", s) } return u, err } // Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte // types are mapped as NaN and !ok func dbToFloat64(t interface{}) (float64, bool) { switch v := t.(type) { case int64: return float64(v), true case float64: return v, true case time.Time: return float64(v.Unix()), true case []byte: // Try and convert to string and then parse to a float64 strV := string(v) result, err := strconv.ParseFloat(strV, 64) if err != nil { level.Info(logger).Log("msg", "Could not parse []byte", "err", err) return math.NaN(), false } return result, true case string: result, err := strconv.ParseFloat(v, 64) if err != nil { level.Info(logger).Log("msg", "Could not parse string", "err", err) return math.NaN(), false } return result, true case bool: if v { return 1.0, true } return 0.0, true case nil: return math.NaN(), true default: return math.NaN(), false } } // Convert database.sql types to uint64 for Prometheus consumption. Null types are mapped to 0. string and []byte // types are mapped as 0 and !ok func dbToUint64(t interface{}) (uint64, bool) { switch v := t.(type) { case uint64: return v, true case int64: return uint64(v), true case float64: return uint64(v), true case time.Time: return uint64(v.Unix()), true case []byte: // Try and convert to string and then parse to a uint64 strV := string(v) result, err := strconv.ParseUint(strV, 10, 64) if err != nil { level.Info(logger).Log("msg", "Could not parse []byte", "err", err) return 0, false } return result, true case string: result, err := strconv.ParseUint(v, 10, 64) if err != nil { level.Info(logger).Log("msg", "Could not parse string", "err", err) return 0, false } return result, true case bool: if v { return 1, true } return 0, true case nil: return 0, true default: return 0, false } } // Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings. func dbToString(t interface{}) (string, bool) { switch v := t.(type) { case int64: return fmt.Sprintf("%v", v), true case float64: return fmt.Sprintf("%v", v), true case time.Time: return fmt.Sprintf("%v", v.Unix()), true case nil: return "", true case []byte: // Try and convert to string return string(v), true case string: return v, true case bool: if v { return "true", true } return "false", true default: return "", false } } func parseFingerprint(url string) (string, error) { dsn, err := pq.ParseURL(url) if err != nil { dsn = url } pairs := strings.Split(dsn, " ") kv := make(map[string]string, len(pairs)) for _, pair := range pairs { splitted := strings.SplitN(pair, "=", 2) if len(splitted) != 2 { return "", fmt.Errorf("malformed dsn %q", dsn) } // Newer versions of pq.ParseURL quote values so trim them off if they exist key := strings.Trim(splitted[0], "'\"") value := strings.Trim(splitted[1], "'\"") kv[key] = value } var fingerprint string if host, ok := kv["host"]; ok { fingerprint += host } else { fingerprint += "localhost" } if port, ok := kv["port"]; ok { fingerprint += ":" + port } else { fingerprint += ":5432" } return fingerprint, nil } func loggableDSN(dsn string) string { pDSN, err := url.Parse(dsn) if err != nil { return "could not parse DATA_SOURCE_NAME" } // Blank user info if not nil if pDSN.User != nil { pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED") } return pDSN.String() } prometheus-postgres-exporter-0.10.1/gh-assets-clone.sh000077500000000000000000000010141417033021000230140ustar00rootroot00000000000000#!/bin/bash # Script to setup the assets clone of the repository using GIT_ASSETS_BRANCH and # GIT_API_KEY. [ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 setup_git() { git config --global user.email "travis@travis-ci.org" || exit 1 git config --global user.name "Travis CI" || exit 1 } # Constants ASSETS_DIR=".assets-branch" # Clone the assets branch with the correct credentials git clone --single-branch -b "$GIT_ASSETS_BRANCH" \ "https://${GIT_API_KEY}@github.com/${TRAVIS_REPO_SLUG}.git" "$ASSETS_DIR" || exit 1 prometheus-postgres-exporter-0.10.1/gh-metrics-push.sh000077500000000000000000000012711417033021000230440ustar00rootroot00000000000000#!/bin/bash # Script to copy and push new metric versions to the assets branch. [ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 [ ! -z "$GIT_API_KEY" ] || exit 1 version=$(git describe HEAD) || exit 1 # Constants ASSETS_DIR=".assets-branch" METRICS_DIR="$ASSETS_DIR/metriclists" # Ensure metrics dir exists mkdir -p "$METRICS_DIR/" # Remove old files so we spot deletions rm -f "$METRICS_DIR/.*.unique" # Copy new files cp -f -t "$METRICS_DIR/" ./.metrics.*.prom.unique || exit 1 # Enter the assets dir and push. cd "$ASSETS_DIR" || exit 1 git add "metriclists" || exit 1 git commit -m "Added unique metrics for build from $version" || exit 1 git push origin "$GIT_ASSETS_BRANCH" || exit 1 exit 0prometheus-postgres-exporter-0.10.1/go.mod000066400000000000000000000007301417033021000205730ustar00rootroot00000000000000module github.com/prometheus-community/postgres_exporter go 1.14 require ( github.com/blang/semver v3.5.1+incompatible github.com/go-kit/log v0.2.0 github.com/lib/pq v1.10.4 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.32.1 github.com/prometheus/exporter-toolkit v0.7.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v2 v2.4.0 ) prometheus-postgres-exporter-0.10.1/go.sum000066400000000000000000001371331417033021000206300ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= prometheus-postgres-exporter-0.10.1/postgres-metrics-get-changes.sh000077500000000000000000000022021417033021000255150ustar00rootroot00000000000000#!/bin/bash # Script to parse a text exposition format file into a unique list of metrics # output by the exporter and then build lists of added/removed metrics. old_src="$1" if [ ! -d "$old_src" ] ; then mkdir -p "$old_src" fi function generate_add_removed() { type="$1" pg_version="$2" old_version="$3" new_version="$4" if [ ! -e "$old_version" ] ; then touch "$old_version" fi comm -23 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.removed" comm -13 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.added" } for raw_prom in $(echo .*.prom) ; do # Get the type and version type=$(echo "$raw_prom" | cut -d'.' -f3) pg_version=$(echo "$raw_prom" | cut -d'.' -f4- | sed 's/\.prom$//g') unique_file="${raw_prom}.unique" old_unique_file="$old_src/$unique_file" # Strip, sort and deduplicate the label names grep -v '#' "$raw_prom" | \ rev | cut -d' ' -f2- | \ rev | cut -d'{' -f1 | \ sort | \ uniq > "$unique_file" generate_add_removed "$type" "$pg_version" "$old_unique_file" "$unique_file" done prometheus-postgres-exporter-0.10.1/postgres_exporter.rc000066400000000000000000000061741417033021000236210ustar00rootroot00000000000000#!/bin/sh # PROVIDE: postgres_exporter # REQUIRE: LOGIN # KEYWORD: shutdown # # rc-script for postgres_exporter # # # Add the following lines to /etc/rc.conf.local or /etc/rc.conf # to enable this service: # # postgres_exporter_enable (bool): Set to NO by default. # Set it to YES to enable postgres_exporter. # postgres_exporter_user (string): Set user that postgres_exporter will run under # Default is "nobody". # postgres_exporter_group (string): Set group that postgres_exporter will run under # Default is "nobody". # postgres_exporter_args (string): Set extra arguments to pass to postgres_exporter # Default is "". # postgres_exporter_listen_address (string):Set ip:port to listen on for web interface and telemetry. # Defaults to ":9187" # postgres_exporter_pg_user (string): Set the Postgres database user # Defaults to "postgres_exporter" # postgres_exporter_pg_pass (string): Set the Postgres datase password # Default is empty # postgres_exporter_pg_host (string): Set the Postgres database server # Defaults to "localhost" # postgres_exporter_pg_port (string): Set the Postgres database port # Defaults to "5432" # Add extra arguments via "postgres_exporter_args" which could be choosen from: # (see $ postgres_exporter --help) # # -dumpmaps # Do not run, simply dump the maps. # -extend.query-path string # Path to custom queries to run. # -log.level value # Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]. # -version # print version and exit # -web.telemetry-path string # Path under which to expose metrics. (default "/metrics") # -log.format value # If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr. # -extend.query-path string # Path to custom queries to run. . /etc/rc.subr name=postgres_exporter rcvar=postgres_exporter_enable load_rc_config $name : ${postgres_exporter_enable:="NO"} : ${postgres_exporter_user:="nobody"} : ${postgres_exporter_group:="nobody"} : ${postgres_exporter_args:=""} : ${postgres_exporter_listen_address:=":9187"} : ${postgres_exporter_pg_user:="postgres_exporter"} : ${postgres_exporter_pg_pass:=""} : ${postgres_exporter_pg_host:="localhost"} : ${postgres_exporter_pg_port:="5432"} postgres_exporter_data_source_name="postgresql://${postgres_exporter_pg_user}:${postgres_exporter_pg_pass}@${postgres_exporter_pg_host}:${postgres_exporter_pg_port}/postgres?sslmode=disable" pidfile=/var/run/postgres_exporter.pid command="/usr/sbin/daemon" procname="/usr/local/bin/postgres_exporter" command_args="-p ${pidfile} /usr/bin/env DATA_SOURCE_NAME="${postgres_exporter_data_source_name}" ${procname} \ -web.listen-address=${postgres_exporter_listen_address} \ ${postgres_exporter_args}" start_precmd=postgres_exporter_startprecmd postgres_exporter_startprecmd() { if [ ! -e ${pidfile} ]; then install -o ${postgres_exporter_user} -g ${postgres_exporter_group} /dev/null ${pidfile}; fi } load_rc_config $name run_rc_command "$1" prometheus-postgres-exporter-0.10.1/postgres_exporter_integration_test_script000077500000000000000000000006001417033021000302330ustar00rootroot00000000000000#!/bin/bash # This script wraps the integration test binary so it produces concatenated # test output. test_binary=$1 shift output_cov=$1 shift echo "Test Binary: $test_binary" 1>&2 echo "Coverage File: $output_cov" 1>&2 echo "mode: count" > $output_cov test_cov=$(mktemp) $test_binary -test.coverprofile=$test_cov $@ || exit 1 tail -n +2 $test_cov >> $output_cov rm -f $test_cov prometheus-postgres-exporter-0.10.1/postgres_mixin/000077500000000000000000000000001417033021000225375ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/postgres_mixin/.gitignore000066400000000000000000000000501417033021000245220ustar00rootroot00000000000000/alerts.yaml /rules.yaml dashboards_out prometheus-postgres-exporter-0.10.1/postgres_mixin/Makefile000066400000000000000000000010641417033021000242000ustar00rootroot00000000000000JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s default: build all: fmt lint build clean fmt: find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ xargs -n 1 -- $(JSONNET_FMT) -i lint: find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ while read f; do \ $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ done mixtool lint mixin.libsonnet build: mixtool generate all mixin.libsonnet clean: rm -rf dashboards_out alerts.yaml rules.yaml prometheus-postgres-exporter-0.10.1/postgres_mixin/README.md000066400000000000000000000016741417033021000240260ustar00rootroot00000000000000# Postgres Mixin _This is a work in progress. We aim for it to become a good role model for alerts and dashboards eventually, but it is not quite there yet._ The Postgres Mixin is a set of configurable, reusable, and extensible alerts and dashboards based on the metrics exported by the Postgres Exporter. The mixin creates recording and alerting rules for Prometheus and suitable dashboard descriptions for Grafana. To use them, you need to have `mixtool` and `jsonnetfmt` installed. If you have a working Go development environment, it's easiest to run the following: ```bash $ go get github.com/monitoring-mixins/mixtool/cmd/mixtool $ go get github.com/google/go-jsonnet/cmd/jsonnetfmt ``` You can then build the Prometheus rules files `alerts.yaml` and `rules.yaml` and a directory `dashboard_out` with the JSON dashboard files for Grafana: ```bash $ make build ``` For more advanced uses of mixins, see https://github.com/monitoring-mixins/docs. prometheus-postgres-exporter-0.10.1/postgres_mixin/alerts/000077500000000000000000000000001417033021000240315ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/postgres_mixin/alerts/alerts.yaml000066400000000000000000000066161417033021000262200ustar00rootroot00000000000000--- groups: - name: PostgreSQL rules: - alert: PostgreSQLMaxConnectionsReached expr: sum(pg_stat_activity_count) by (instance) >= sum(pg_settings_max_connections) by (instance) - sum(pg_settings_superuser_reserved_connections) by (instance) for: 1m labels: severity: email annotations: summary: "{{ $labels.instance }} has maxed out Postgres connections." description: "{{ $labels.instance }} is exceeding the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Services may be degraded - please take immediate action (you probably need to increase max_connections in the Docker image and re-deploy." - alert: PostgreSQLHighConnections expr: sum(pg_stat_activity_count) by (instance) > (sum(pg_settings_max_connections) by (instance) - sum(pg_settings_superuser_reserved_connections) by (instance)) * 0.8 for: 10m labels: severity: email annotations: summary: "{{ $labels.instance }} is over 80% of max Postgres connections." description: "{{ $labels.instance }} is exceeding 80% of the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Please check utilization graphs and confirm if this is normal service growth, abuse or an otherwise temporary condition or if new resources need to be provisioned (or the limits increased, which is mostly likely)." - alert: PostgreSQLDown expr: pg_up != 1 for: 1m labels: severity: email annotations: summary: "PostgreSQL is not processing queries: {{ $labels.instance }}" description: "{{ $labels.instance }} is rejecting query requests from the exporter, and thus probably not allowing DNS requests to work either. User services should not be effected provided at least 1 node is still alive." - alert: PostgreSQLSlowQueries expr: avg(rate(pg_stat_activity_max_tx_duration{datname!~"template.*"}[2m])) by (datname) > 2 * 60 for: 2m labels: severity: email annotations: summary: "PostgreSQL high number of slow on {{ $labels.cluster }} for database {{ $labels.datname }} " description: "PostgreSQL high number of slow queries {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }} " - alert: PostgreSQLQPS expr: avg(irate(pg_stat_database_xact_commit{datname!~"template.*"}[5m]) + irate(pg_stat_database_xact_rollback{datname!~"template.*"}[5m])) by (datname) > 10000 for: 5m labels: severity: email annotations: summary: "PostgreSQL high number of queries per second {{ $labels.cluster }} for database {{ $labels.datname }}" description: "PostgreSQL high number of queries per second on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" - alert: PostgreSQLCacheHitRatio expr: avg(rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) / (rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) + rate(pg_stat_database_blks_read{datname!~"template.*"}[5m]))) by (datname) < 0.98 for: 5m labels: severity: email annotations: summary: "PostgreSQL low cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }}" description: "PostgreSQL low on cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" prometheus-postgres-exporter-0.10.1/postgres_mixin/dashboards/000077500000000000000000000000001417033021000246515ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/postgres_mixin/dashboards/postgres-overview.json000066400000000000000000001020211417033021000312520ustar00rootroot00000000000000{ "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "description": "Performance metrics for Postgres", "editable": true, "gnetId": 455, "graphTooltip": 0, "id": 1, "iteration": 1603191461722, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Postgres Overview", "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 20, "x": 0, "y": 0 }, "hiddenSeries": false, "id": 1, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.1", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "alias": "fetched", "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_tup_fetched{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "fetched", "measurement": "postgresql", "policy": "default", "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ "tup_fetched" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 120, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "fetched", "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_tup_returned{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "returned", "measurement": "postgresql", "policy": "default", "refId": "B", "resultFormat": "time_series", "select": [ [ { "params": [ "tup_fetched" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 120, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "fetched", "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_tup_inserted{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "inserted", "measurement": "postgresql", "policy": "default", "refId": "C", "resultFormat": "time_series", "select": [ [ { "params": [ "tup_fetched" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 120, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "fetched", "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_tup_updated{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "updated", "measurement": "postgresql", "policy": "default", "refId": "D", "resultFormat": "time_series", "select": [ [ { "params": [ "tup_fetched" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 120, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "fetched", "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_tup_deleted{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "deleted", "measurement": "postgresql", "policy": "default", "refId": "E", "resultFormat": "time_series", "select": [ [ { "params": [ "tup_fetched" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 120, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Rows", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "datasource": "Postgres Overview", "decimals": 0, "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "format": "none", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "gridPos": { "h": 3, "w": 4, "x": 20, "y": 0 }, "height": "55px", "id": 11, "interval": null, "isNew": true, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": true, "lineColor": "rgb(31, 120, 193)", "show": true }, "tableColumn": "", "targets": [ { "dsType": "prometheus", "expr": "sum(irate(pg_stat_database_xact_commit{datname=~\"$db\",instance=~\"$instance\"}[5m])) + sum(irate(pg_stat_database_xact_rollback{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "measurement": "postgresql", "policy": "default", "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ "xact_commit" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [ "10s" ], "type": "non_negative_derivative" } ] ], "step": 1800, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] } ], "thresholds": "", "title": "QPS", "transparent": true, "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "avg" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Postgres Overview", "decimals": 1, "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 7 }, "hiddenSeries": false, "id": 2, "isNew": true, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": true, "rightSide": false, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.1", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "alias": "Buffers Allocated", "dsType": "prometheus", "expr": "irate(pg_stat_bgwriter_buffers_alloc{instance='$instance'}[5m])", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "buffers_alloc", "measurement": "postgresql", "policy": "default", "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ "buffers_alloc" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "Buffers Allocated", "dsType": "prometheus", "expr": "irate(pg_stat_bgwriter_buffers_backend_fsync{instance='$instance'}[5m])", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "buffers_backend_fsync", "measurement": "postgresql", "policy": "default", "refId": "B", "resultFormat": "time_series", "select": [ [ { "params": [ "buffers_alloc" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "Buffers Allocated", "dsType": "prometheus", "expr": "irate(pg_stat_bgwriter_buffers_backend{instance='$instance'}[5m])", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "buffers_backend", "measurement": "postgresql", "policy": "default", "refId": "C", "resultFormat": "time_series", "select": [ [ { "params": [ "buffers_alloc" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "Buffers Allocated", "dsType": "prometheus", "expr": "irate(pg_stat_bgwriter_buffers_clean{instance='$instance'}[5m])", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "buffers_clean", "measurement": "postgresql", "policy": "default", "refId": "D", "resultFormat": "time_series", "select": [ [ { "params": [ "buffers_alloc" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "Buffers Allocated", "dsType": "prometheus", "expr": "irate(pg_stat_bgwriter_buffers_checkpoint{instance='$instance'}[5m])", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "buffers_checkpoint", "measurement": "postgresql", "policy": "default", "refId": "E", "resultFormat": "time_series", "select": [ [ { "params": [ "buffers_alloc" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Buffers", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Postgres Overview", "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 7 }, "hiddenSeries": false, "id": 3, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.1", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "alias": "conflicts", "dsType": "prometheus", "expr": "sum(rate(pg_stat_database_deadlocks{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "deadlocks", "measurement": "postgresql", "policy": "default", "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ "conflicts" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] }, { "alias": "deadlocks", "dsType": "prometheus", "expr": "sum(rate(pg_stat_database_conflicts{datname=~\"$db\",instance=~\"$instance\"}[5m]))", "format": "time_series", "groupBy": [ { "params": [ "$interval" ], "type": "time" }, { "params": [ "null" ], "type": "fill" } ], "intervalFactor": 2, "legendFormat": "conflicts", "measurement": "postgresql", "policy": "default", "refId": "B", "resultFormat": "time_series", "select": [ [ { "params": [ "deadlocks" ], "type": "field" }, { "params": [], "type": "mean" }, { "params": [], "type": "difference" } ] ], "step": 240, "tags": [ { "key": "instance", "operator": "=~", "value": "/^$instance$/" } ] } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Conflicts/Deadlocks", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Postgres Overview", "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 14 }, "hiddenSeries": false, "id": 12, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": true, "pluginVersion": "7.2.1", "pointradius": 1, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum(pg_stat_database_blks_hit{datname=~\"$db\",instance=~\"$instance\"}) / (sum(pg_stat_database_blks_hit{datname=~\"$db\",instance=~\"$instance\"}) + sum(pg_stat_database_blks_read{datname=~\"$db\",instance=~\"$instance\"}))", "format": "time_series", "intervalFactor": 2, "legendFormat": "cache hit rate", "refId": "A", "step": 240 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Cache hit ratio", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percentunit", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Postgres Overview", "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 14 }, "hiddenSeries": false, "id": 13, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.1", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "pg_stat_database_numbackends{datname=~\"$db\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{__name__}}", "refId": "A", "step": 240 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Number of active connections", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "refresh": false, "schemaVersion": 26, "style": "dark", "tags": [ "postgres" ], "templating": { "list": [ { "allValue": ".*", "current": { "selected": false, "text": "All", "value": "$__all" }, "datasource": "Postgres Overview", "definition": "", "hide": 0, "includeAll": true, "label": null, "multi": false, "name": "instance", "options": [], "query": "label_values(up{job=~\"postgres.*\"},instance)", "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": ".*", "current": { "selected": false, "text": "All", "value": "$__all" }, "datasource": "Postgres Overview", "definition": "label_values(pg_stat_database_tup_fetched{instance=~\"$instance\",datname!~\"template.*|postgres\"},datname)", "hide": 0, "includeAll": true, "label": "db", "multi": false, "name": "db", "options": [], "query": "label_values(pg_stat_database_tup_fetched{instance=~\"$instance\",datname!~\"template.*|postgres\"},datname)", "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "current": { "selected": false, "text": "Postgres Overview", "value": "Postgres Overview" }, "hide": 0, "includeAll": false, "label": "datasource", "multi": false, "name": "datasource", "options": [], "query": "prometheus", "refresh": 1, "regex": "", "skipUrlSync": false, "type": "datasource" }, { "allValue": null, "current": { "selected": true, "text": "postgres", "value": "postgres" }, "datasource": "$datasource", "definition": "label_values(pg_up, job)", "hide": 0, "includeAll": false, "label": "job", "multi": false, "name": "job", "options": [ { "selected": true, "text": "postgres", "value": "postgres" } ], "query": "label_values(pg_up, job)", "refresh": 0, "regex": "", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "browser", "title": "Postgres Overview", "uid": "wGgaPlciz", "version": 5 } prometheus-postgres-exporter-0.10.1/postgres_mixin/go.mod000066400000000000000000000001061417033021000236420ustar00rootroot00000000000000module github.com/wrouesnel/postgres_exporter/postgres_mixin go 1.15 prometheus-postgres-exporter-0.10.1/postgres_mixin/mixin.libsonnet000066400000000000000000000006411417033021000256030ustar00rootroot00000000000000{ grafanaDashboards: { 'postgres-overview.json': (import 'dashboards/postgres-overview.json'), }, // Helper function to ensure that we don't override other rules, by forcing // the patching of the groups list, and not the overall rules object. local importRules(rules) = { groups+: std.native('parseYaml')(rules)[0].groups, }, prometheusAlerts+: importRules(importstr 'alerts/alerts.yaml'), } prometheus-postgres-exporter-0.10.1/queries.yaml000066400000000000000000000241761417033021000220400ustar00rootroot00000000000000pg_replication: query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag" master: true metrics: - lag: usage: "GAUGE" description: "Replication lag behind master in seconds" pg_postmaster: query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" master: true metrics: - start_time_seconds: usage: "GAUGE" description: "Time at which postmaster started" pg_stat_user_tables: query: | SELECT current_database() datname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, n_mod_since_analyze, COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, COALESCE(last_analyze, '1970-01-01Z') as last_analyze, COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_stat_user_tables metrics: - datname: usage: "LABEL" description: "Name of current database" - schemaname: usage: "LABEL" description: "Name of the schema that this table is in" - relname: usage: "LABEL" description: "Name of this table" - seq_scan: usage: "COUNTER" description: "Number of sequential scans initiated on this table" - seq_tup_read: usage: "COUNTER" description: "Number of live rows fetched by sequential scans" - idx_scan: usage: "COUNTER" description: "Number of index scans initiated on this table" - idx_tup_fetch: usage: "COUNTER" description: "Number of live rows fetched by index scans" - n_tup_ins: usage: "COUNTER" description: "Number of rows inserted" - n_tup_upd: usage: "COUNTER" description: "Number of rows updated" - n_tup_del: usage: "COUNTER" description: "Number of rows deleted" - n_tup_hot_upd: usage: "COUNTER" description: "Number of rows HOT updated (i.e., with no separate index update required)" - n_live_tup: usage: "GAUGE" description: "Estimated number of live rows" - n_dead_tup: usage: "GAUGE" description: "Estimated number of dead rows" - n_mod_since_analyze: usage: "GAUGE" description: "Estimated number of rows changed since last analyze" - last_vacuum: usage: "GAUGE" description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)" - last_autovacuum: usage: "GAUGE" description: "Last time at which this table was vacuumed by the autovacuum daemon" - last_analyze: usage: "GAUGE" description: "Last time at which this table was manually analyzed" - last_autoanalyze: usage: "GAUGE" description: "Last time at which this table was analyzed by the autovacuum daemon" - vacuum_count: usage: "COUNTER" description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)" - autovacuum_count: usage: "COUNTER" description: "Number of times this table has been vacuumed by the autovacuum daemon" - analyze_count: usage: "COUNTER" description: "Number of times this table has been manually analyzed" - autoanalyze_count: usage: "COUNTER" description: "Number of times this table has been analyzed by the autovacuum daemon" pg_statio_user_tables: query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" metrics: - datname: usage: "LABEL" description: "Name of current database" - schemaname: usage: "LABEL" description: "Name of the schema that this table is in" - relname: usage: "LABEL" description: "Name of this table" - heap_blks_read: usage: "COUNTER" description: "Number of disk blocks read from this table" - heap_blks_hit: usage: "COUNTER" description: "Number of buffer hits in this table" - idx_blks_read: usage: "COUNTER" description: "Number of disk blocks read from all indexes on this table" - idx_blks_hit: usage: "COUNTER" description: "Number of buffer hits in all indexes on this table" - toast_blks_read: usage: "COUNTER" description: "Number of disk blocks read from this table's TOAST table (if any)" - toast_blks_hit: usage: "COUNTER" description: "Number of buffer hits in this table's TOAST table (if any)" - tidx_blks_read: usage: "COUNTER" description: "Number of disk blocks read from this table's TOAST table indexes (if any)" - tidx_blks_hit: usage: "COUNTER" description: "Number of buffer hits in this table's TOAST table indexes (if any)" pg_database: query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database" master: true cache_seconds: 30 metrics: - datname: usage: "LABEL" description: "Name of the database" - size_bytes: usage: "GAUGE" description: "Disk space used by the database" pg_stat_statements: query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'" master: true metrics: - rolname: usage: "LABEL" description: "Name of user" - datname: usage: "LABEL" description: "Name of database" - queryid: usage: "LABEL" description: "Query ID" - calls: usage: "COUNTER" description: "Number of times executed" - total_time_seconds: usage: "COUNTER" description: "Total time spent in the statement, in milliseconds" - min_time_seconds: usage: "GAUGE" description: "Minimum time spent in the statement, in milliseconds" - max_time_seconds: usage: "GAUGE" description: "Maximum time spent in the statement, in milliseconds" - mean_time_seconds: usage: "GAUGE" description: "Mean time spent in the statement, in milliseconds" - stddev_time_seconds: usage: "GAUGE" description: "Population standard deviation of time spent in the statement, in milliseconds" - rows: usage: "COUNTER" description: "Total number of rows retrieved or affected by the statement" - shared_blks_hit: usage: "COUNTER" description: "Total number of shared block cache hits by the statement" - shared_blks_read: usage: "COUNTER" description: "Total number of shared blocks read by the statement" - shared_blks_dirtied: usage: "COUNTER" description: "Total number of shared blocks dirtied by the statement" - shared_blks_written: usage: "COUNTER" description: "Total number of shared blocks written by the statement" - local_blks_hit: usage: "COUNTER" description: "Total number of local block cache hits by the statement" - local_blks_read: usage: "COUNTER" description: "Total number of local blocks read by the statement" - local_blks_dirtied: usage: "COUNTER" description: "Total number of local blocks dirtied by the statement" - local_blks_written: usage: "COUNTER" description: "Total number of local blocks written by the statement" - temp_blks_read: usage: "COUNTER" description: "Total number of temp blocks read by the statement" - temp_blks_written: usage: "COUNTER" description: "Total number of temp blocks written by the statement" - blk_read_time_seconds: usage: "COUNTER" description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" - blk_write_time_seconds: usage: "COUNTER" description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" pg_process_idle: query: | WITH metrics AS ( SELECT application_name, SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, COUNT(*) AS process_idle_seconds_count FROM pg_stat_activity WHERE state = 'idle' GROUP BY application_name ), buckets AS ( SELECT application_name, le, SUM( CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le THEN 1 ELSE 0 END )::bigint AS bucket FROM pg_stat_activity, UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le GROUP BY application_name, le ORDER BY application_name, le ) SELECT application_name, process_idle_seconds_sum as seconds_sum, process_idle_seconds_count as seconds_count, ARRAY_AGG(le) AS seconds, ARRAY_AGG(bucket) AS seconds_bucket FROM metrics JOIN buckets USING (application_name) GROUP BY 1, 2, 3 metrics: - application_name: usage: "LABEL" description: "Application Name" - seconds: usage: "HISTOGRAM" description: "Idle time of server processes" prometheus-postgres-exporter-0.10.1/scripts/000077500000000000000000000000001417033021000211545ustar00rootroot00000000000000prometheus-postgres-exporter-0.10.1/scripts/errcheck_excludes.txt000066400000000000000000000001051417033021000253730ustar00rootroot00000000000000// Never check for logger errors. (github.com/go-kit/log.Logger).Log