pax_global_header00006660000000000000000000000064133611651170014516gustar00rootroot0000000000000052 comment=4c8c39e9c3a8e40aca8ddd38cab2a453ff84310f prometheus-mysqld-exporter-0.11.0+ds/000077500000000000000000000000001336116511700175475ustar00rootroot00000000000000prometheus-mysqld-exporter-0.11.0+ds/.circleci/000077500000000000000000000000001336116511700214025ustar00rootroot00000000000000prometheus-mysqld-exporter-0.11.0+ds/.circleci/config.yml000066400000000000000000000070241336116511700233750ustar00rootroot00000000000000--- version: 2 jobs: test: docker: - image: circleci/golang:1.10 working_directory: /go/src/github.com/prometheus/mysqld_exporter steps: - checkout - run: make promu - run: make - run: rm -v mysqld_exporter codespell: docker: - image: circleci/python steps: - checkout - run: sudo pip install codespell - run: codespell --skip=".git,./vendor,ttar" build: machine: true working_directory: /home/circleci/.go_workspace/src/github.com/prometheus/mysqld_exporter steps: - checkout - run: make promu - run: promu crossbuild -v - persist_to_workspace: root: . paths: - .build docker_hub_master: docker: - image: circleci/golang:1.10 working_directory: /go/src/github.com/prometheus/mysqld_exporter environment: DOCKER_IMAGE_NAME: prom/mysqld-exporter QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter steps: - checkout - setup_remote_docker - attach_workspace: at: . - run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME - run: docker images - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - run: docker push $DOCKER_IMAGE_NAME - run: docker push $QUAY_IMAGE_NAME docker_hub_release_tags: docker: - image: circleci/golang:1.10 working_directory: /go/src/github.com/prometheus/mysqld_exporter environment: DOCKER_IMAGE_NAME: prom/mysqld-exporter QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter steps: - checkout - setup_remote_docker - run: mkdir -v -p ${HOME}/bin - run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin - run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV} - attach_workspace: at: . - run: make promu - run: promu crossbuild tarballs - run: promu checksum .tarballs - run: promu release .tarballs - store_artifacts: path: .tarballs destination: releases - run: ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - run: | if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" fi - run: docker push $DOCKER_IMAGE_NAME - run: docker push $QUAY_IMAGE_NAME workflows: version: 2 mysqld_exporter: jobs: - test: filters: tags: only: /.*/ - build: filters: tags: only: /.*/ - codespell: filters: tags: only: /.*/ - docker_hub_master: requires: - test - build filters: branches: only: master - docker_hub_release_tags: requires: - test - build filters: tags: only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ branches: ignore: /.*/ prometheus-mysqld-exporter-0.11.0+ds/.github/000077500000000000000000000000001336116511700211075ustar00rootroot00000000000000prometheus-mysqld-exporter-0.11.0+ds/.github/ISSUE_TEMPLATE.md000066400000000000000000000015041336116511700236140ustar00rootroot00000000000000 ### Host operating system: output of `uname -a` ### mysqld_exporter version: output of `mysqld_exporter --version` ### MySQL server version ### mysqld_exporter command line flags ### What did you do that produced an error? ### What did you expect to see? ### What did you see instead? prometheus-mysqld-exporter-0.11.0+ds/.gitignore000066400000000000000000000001231336116511700215330ustar00rootroot00000000000000/.build /mysqld_exporter /.release /.tarballs *.tar.gz *.test *-stamp .idea *.iml prometheus-mysqld-exporter-0.11.0+ds/.promu.yml000066400000000000000000000011711336116511700215120ustar00rootroot00000000000000repository: path: github.com/prometheus/mysqld_exporter build: flags: -a -tags netgo ldflags: | -X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE prometheus-mysqld-exporter-0.11.0+ds/.travis.yml000066400000000000000000000006571336116511700216700ustar00rootroot00000000000000dist: trusty sudo: required language: go go: - 1.9.x - 1.10.x env: - MYSQL_IMAGE=mysql/mysql-server:5.5 - MYSQL_IMAGE=mysql/mysql-server:5.6 - MYSQL_IMAGE=mysql/mysql-server:5.7 - MYSQL_IMAGE=mysql/mysql-server:8.0 services: - docker go_import_path: github.com/prometheus/mysqld_exporter before_script: - sudo service mysql stop - docker --version - docker-compose --version - docker-compose up -d script: - make - make test prometheus-mysqld-exporter-0.11.0+ds/CHANGELOG.md000066400000000000000000000154631336116511700213710ustar00rootroot00000000000000## v0.10.0 / 2018-06-29 ### BREAKING CHANGES: * Flags now use the Kingpin library, and require double-dashes. #222 This also changes the behavior of boolean flags. * Enable: `--collector.global_status` * Disable: `--no-collector.global_status` ### Changes: * [CHANGE] Limit number and lifetime of connections #208 * [ENHANCEMENT] Move session params to DSN #259 * [ENHANCEMENT] Use native DB.Ping() instead of self-written implementation #210 * [FEATURE] Add collector duration metrics #197 * [FEATURE] Add 'collect[]' URL parameter to filter enabled collectors #235 * [FEATURE] Set a `lock_wait_timeout` on the MySQL connection #252 * [FEATURE] Set `last_scrape_error` when an error occurs #237 * [FEATURE] Collect metrics from `performance_schema.replication_group_member_stats` #271 * [FEATURE] Add innodb compression statistic #275 * [FEATURE] Add metrics for the output of `SHOW SLAVE HOSTS` #279 * [FEATURE] Support custom CA truststore and client SSL keypair. #255 * [BUGFIX] Fix perfEventsStatementsQuery #213 * [BUGFIX] Fix `file_instances` metric collector #205 * [BUGFIX] Fix prefix removal in `perf_schema_file_instances` #257 * [BUGFIX] Fix 32bit compile issue #273 * [BUGFIX] Ignore boolean keys in my.cnf. #283 ## v0.10.0 / 2017-04-25 BREAKING CHANGES: * `mysql_slave_...` metrics now include an additional `connection_name` label to support mariadb multi-source replication. (#178) * [FEATURE] Add read/write query response time #166 * [FEATURE] Add Galera gcache size metric #169 * [FEATURE] Add MariaDB multi source replication support #178 * [FEATURE] Implement heartbeat metrics #183 * [FEATURE] Add basic file_summary_by_instance metrics #189 * [BUGFIX] Workaround MySQL bug 79533 #173 ## 0.9.0 / 2016-09-26 BREAKING CHANGES: * InnoDB buffer pool page stats have been renamed/fixed to better support aggregations (#130) * [FEATURE] scrape slave status for multisource replication #134 * [FEATURE] Add client statistics support (+ add tests on users & clients statistics) #138 * [IMPROVEMENT] Consistency of error logging. #144 * [IMPROVEMENT] Add label aggregation for innodb buffer metrics #130 * [IMPROVEMENT] Improved and fixed user/client statistics #149 * [FEATURE] Added the last binlog file number metric. #152 * [MISC] Add an example recording rules file #156 * [FEATURE] Added PXC/Galera info metrics. #155 * [FEATURE] Added metrics from SHOW ENGINE INNODB STATUS. #160 * [IMPROVEMENT] Fix wsrep_cluster_status #146 ## 0.8.1 / 2016-05-05 * [BUGFIX] Fix collect.info_schema.innodb_tablespaces #119 * [BUGFIX] Fix SLAVE STATUS "Connecting" #125 * [MISC] New release process using docker, circleci and a centralized building tool #120 * [MISC] Typos #121 ## 0.8.0 / 2016-04-19 BREAKING CHANGES: * global status `innodb_buffer_pool_pages` have been renamed/labeled. * innodb metrics `buffer_page_io` have been renamed/labeled. * [MISC] Add Travis CI automatic testing. * [MISC] Refactor mysqld_exporter.go into collector package. * [FEATURE] Add `mysql_up` metric (PR #99) * [FEATURE] Collect time metrics for processlist (PR #87) * [CHANGE] Separate innodb_buffer_pool_pages status metrics (PR #101) * [FEATURE] Added metrics from SHOW ENGINE TOKUDB STATUS (PR #103) * [CHANGE] Add special handling of "buffer_page_io" subsystem (PR #115) * [FEATURE] Add collector for innodb_sys_tablespaces (PR #116) ## 0.7.1 / 2016-02-16 * [IMPROVEMENT] Soft error on collector failure (PR #84) * [BUGFIX] Fix innodb_metrics collector (PR #85) * [BUGFIX] Parse auto increment values and maximum as float64 (PR #88) ## 0.7.0 / 2016-02-12 BREAKING CHANGES: * Global status metrics for "handlers" have been renamed * [FEATURE] New collector for `information_schema.table_statistics` (PR #57) * [FEATURE] New server version metric (PR #59) * [FEATURE] New collector for `information_schema.innodb_metrics` (PR #69) * [FEATURE] Read credentials from ".my.cnf" files (PR #77) * [FEATURE] New collector for query response time distribution (PR #79) * [FEATURE] Add minimum time flag for processlist metrics (PR #82) * [IMPROVEMENT] Collect more metrics from `performance_schema.events_statements_summary_by_digest` (PR #58) * [IMPROVEMENT] Add option to filter metrics queries from the slow log (PR #60) * [IMPROVEMENT] Leverage lock-free SHOW SLAVE STATUS (PR #61) * [IMPROVEMENT] Add labels to global status "handlers" counters (PR #68) * [IMPROVEMENT] Update Makefile.COMMON from utils repo (PR #73) * [BUGFIX] Fix broken error return in the scrape function and log an error (PR #64) * [BUGFIX] Check log_bin before running SHOW BINARY LOGS (PR #74) * [BUGFIX] Fixed uint for scrapeInnodbMetrics() and gofmt (PR #81) ## 0.6.0 / 2015-10-28 BREAKING CHANGES: * The digest_text mapping metric has been removed, now included in all digest metrics (PR #50) * Flags for timing metrics have been removed, now included with related counter flag (PR #48) * [FEATURE] New collector for metrics from information_schema.processlist (PR #34) * [FEATURE] New collector for binlog counts/sizes (PR #35) * [FEATURE] New collector for performance_schema.{file_summary_by_event_name,events_waits_summary_global_by_event_name} (PR #49) * [FEATURE] New collector for information_schema.tables (PR #51) * [IMPROVEMENT] All collection methods now have enable flags (PR #46) * [IMPROVEMENT] Consolidate performance_schema metrics flags (PR #48) * [IMPROVEMENT] Removed need for digest_text mapping metric (PR #50) * [IMPROVEMENT] Update docs (PR #52) ## 0.5.0 / 2015-09-22 * [FEATURE] Add metrics for table locks * [BUGFIX] Use uint64 to prevent int64 overflow * [BUGFIX] Correct picsecond times to correct second values ## 0.4.0 / 2015-09-21 * [CHANGE] Limit events_statements to recently used * [FEATURE] Add digest_text mapping metric * [IMPROVEMENT] General refactoring ## 0.3.0 / 2015-08-31 BREAKING CHANGES: Most metrics have been prefixed with Prometheus subsystem names to avoid conflicts between different collection methods. * [BUGFIX] Separate slave_status and global_status into separate subsystems. * [IMPROVEMENT] Refactor metrics creation. * [IMPROVEMENT] Add support for performance_schema.table_io_waits_summary_by_table collection. * [IMPROVEMENT] Add support for performance_schema.table_io_waits_summary_by_index_usage collection. * [IMPROVEMENT] Add support for performance_schema.events_statements_summary_by_digest collection. * [IMPROVEMENT] Add support for Percona userstats output collection. * [IMPROVEMENT] Add support for auto_increment column metrics collection. * [IMPROVEMENT] Add support for `SHOW GLOBAL VARIABLES` metrics collection. ## 0.2.0 / 2015-06-24 BREAKING CHANGES: Logging-related flags have changed. Metric names have changed. * [IMPROVEMENT] Add Docker support. * [CHANGE] Switch logging to Prometheus' logging library. * [BUGFIX] Fix slave status parsing. * [BUGFIX] Fix truncated numbers. * [CHANGE] Reorganize metrics names and types. ## 0.1.0 / 2015-05-05 * Initial release prometheus-mysqld-exporter-0.11.0+ds/CONTRIBUTING.md000066400000000000000000000017561336116511700220110ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). ## Local setup The easiest way to make a local development setup is to use Docker Compose. ``` docker-compose up make make test ``` prometheus-mysqld-exporter-0.11.0+ds/Dockerfile000066400000000000000000000003361336116511700215430ustar00rootroot00000000000000FROM quay.io/prometheus/busybox:latest MAINTAINER The Prometheus Authors COPY mysqld_exporter /bin/mysqld_exporter EXPOSE 9104 ENTRYPOINT [ "/bin/mysqld_exporter" ] prometheus-mysqld-exporter-0.11.0+ds/LICENSE000066400000000000000000000260751336116511700205660ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-mysqld-exporter-0.11.0+ds/MAINTAINERS.md000066400000000000000000000000401336116511700216350ustar00rootroot00000000000000* Ben Kochie prometheus-mysqld-exporter-0.11.0+ds/Makefile000066400000000000000000000015541336116511700212140ustar00rootroot00000000000000# Copyright 2015 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. all: vet include Makefile.common STATICCHECK_IGNORE = \ github.com/prometheus/mysqld_exporter/mysqld_exporter.go:SA1019 DOCKER_IMAGE_NAME ?= mysqld-exporter test-docker: @echo ">> testing docker image" ./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9104 .PHONY: test-docker prometheus-mysqld-exporter-0.11.0+ds/Makefile.common000066400000000000000000000061421336116511700225010ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) PROMU := $(FIRST_GOPATH)/bin/promu STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck GOVENDOR := $(FIRST_GOPATH)/bin/govendor pkgs = ./... PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) all: style staticcheck unused build test style: @echo ">> checking code style" ! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi test-short: @echo ">> running short tests" $(GO) test -short $(pkgs) test: @echo ">> running all tests" $(GO) test -race $(pkgs) format: @echo ">> formatting code" $(GO) fmt $(pkgs) vet: @echo ">> vetting code" $(GO) vet $(pkgs) staticcheck: $(STATICCHECK) @echo ">> running staticcheck" $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) unused: $(GOVENDOR) @echo ">> running check for unused packages" @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' build: promu @echo ">> building binaries" $(PROMU) build --prefix $(PREFIX) tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) docker: docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . promu: GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu $(FIRST_GOPATH)/bin/staticcheck: GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck $(FIRST_GOPATH)/bin/govendor: GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor .PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendorprometheus-mysqld-exporter-0.11.0+ds/NOTICE000066400000000000000000000001011336116511700204430ustar00rootroot00000000000000Exporter for MySQL daemon. Copyright 2015 The Prometheus Authors prometheus-mysqld-exporter-0.11.0+ds/README.md000066400000000000000000000242351336116511700210340ustar00rootroot00000000000000# MySQL Server Exporter [![Build Status](https://travis-ci.org/prometheus/mysqld_exporter.svg)][travis] [![CircleCI](https://circleci.com/gh/prometheus/mysqld_exporter/tree/master.svg?style=shield)][circleci] [![Docker Repository on Quay](https://quay.io/repository/prometheus/mysqld-exporter/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/mysqld-exporter.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/mysqld_exporter)](https://goreportcard.com/report/github.com/prometheus/mysqld_exporter) Prometheus exporter for MySQL server metrics. Supported MySQL versions: 5.1 and up. NOTE: Not all collection methods are supported on MySQL < 5.6 ## Building and running ### Required Grants ```sql CREATE USER 'exporter'@'localhost' IDENTIFIED BY 'XXXXXXXX' WITH MAX_USER_CONNECTIONS 3; GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'exporter'@'localhost'; ``` NOTE: It is recommended to set a max connection limit for the user to avoid overloading the server with monitoring scrapes under heavy load. ### Build make ### Running Running using an environment variable: export DATA_SOURCE_NAME='user:password@(hostname:3306)/' ./mysqld_exporter Running using ~/.my.cnf: ./mysqld_exporter Example format for flags for version > 0.10.0: --collect.auto_increment.columns --no-collect.auto_increment.columns Example format for flags for version <= 0.10.0: -collect.auto_increment.columns -collect.auto_increment.columns=[true|false] ### Collector Flags Name | MySQL Version | Description -------------------------------------------------------|---------------|------------------------------------------------------------------------------------ collect.auto_increment.columns | 5.1 | Collect auto_increment columns and max values from information_schema. collect.binlog_size | 5.1 | Collect the current size of all registered binlog files collect.engine_innodb_status | 5.1 | Collect from SHOW ENGINE INNODB STATUS. collect.engine_tokudb_status | 5.6 | Collect from SHOW ENGINE TOKUDB STATUS. collect.global_status | 5.1 | Collect from SHOW GLOBAL STATUS (Enabled by default) collect.global_variables | 5.1 | Collect from SHOW GLOBAL VARIABLES (Enabled by default) collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics. collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics. collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces. collect.info_schema.innodb_cmp | 5.5 | Collect InnoDB compressed tables metrics from information_schema.innodb_cmp. collect.info_schema.innodb_cmpmem | 5.5 | Collect InnoDB buffer pool compression metrics from information_schema.innodb_cmpmem. collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist. collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0) collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON. collect.info_schema.tables | 5.1 | Collect metrics from information_schema.tables (Enabled by default) collect.info_schema.tables.databases | 5.1 | The list of databases to collect table stats for, or '`*`' for all. collect.info_schema.tablestats | 5.1 | If running with userstat=1, set to true to collect table statistics. collect.info_schema.userstats | 5.1 | If running with userstat=1, set to true to collect user statistics. collect.perf_schema.eventsstatements | 5.6 | Collect metrics from performance_schema.events_statements_summary_by_digest. collect.perf_schema.eventsstatements.digest_text_limit | 5.6 | Maximum length of the normalized statement text. (default: 120) collect.perf_schema.eventsstatements.limit | 5.6 | Limit the number of events statements digests by response time. (default: 250) collect.perf_schema.eventsstatements.timelimit | 5.6 | Limit how old the 'last_seen' events statements can be, in seconds. (default: 86400) collect.perf_schema.eventswaits | 5.5 | Collect metrics from performance_schema.events_waits_summary_global_by_event_name. collect.perf_schema.file_events | 5.6 | Collect metrics from performance_schema.file_summary_by_event_name. collect.perf_schema.file_instances | 5.5 | Collect metrics from performance_schema.file_summary_by_instance. collect.perf_schema.indexiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage. collect.perf_schema.tableiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_table. collect.perf_schema.tablelocks | 5.6 | Collect metrics from performance_schema.table_lock_waits_summary_by_table. collect.perf_schema.replication_group_member_stats | 5.7 | Collect metrics from performance_schema.replication_group_member_stats. collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default) collect.slave_hosts | 5.1 | Collect from SHOW SLAVE HOSTS collect.heartbeat | 5.1 | Collect from [heartbeat](#heartbeat). collect.heartbeat.database | 5.1 | Database from where to collect heartbeat data. (default: heartbeat) collect.heartbeat.table | 5.1 | Table from where to collect heartbeat data. (default: heartbeat) ### General Flags Name | Description -------------------------------------------|-------------------------------------------------------------------------------------------------- config.my-cnf | Path to .my.cnf file to read MySQL credentials from. (default: `~/.my.cnf`) log.level | Logging verbosity (default: info) exporter.lock_wait_timeout | Set a lock_wait_timeout on the connection to avoid long metadata locking. (default: 2 seconds) exporter.log_slow_filter | Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL. web.listen-address | Address to listen on for web interface and telemetry. web.telemetry-path | Path under which to expose metrics. version | Print the version information. ### Setting the MySQL server's data source name The MySQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_name) must be set via the `DATA_SOURCE_NAME` environment variable. The format of this variable is described at https://github.com/go-sql-driver/mysql#dsn-data-source-name. ## Customizing Configuration for a SSL Connection if The MySQL server supports SSL, you may need to specify a CA truststore to verify the server's chain-of-trust. You may also need to specify a SSL keypair for the client side of the SSL connection. To configure the mysqld exporter to use a custom CA certificate, add the following to the mysql cnf file: ``` ssl-ca=/path/to/ca/file ``` To specify the client SSL keypair, add the following to the cnf. ``` ssl-key=/path/to/ssl/client/key ssl-cert=/path/to/ssl/client/cert ``` Customizing the SSL configuration is only supported in the mysql cnf file and is not supported if you set the mysql server's data source name in the environment variable DATA_SOURCE_NAME. ## Using Docker You can deploy this exporter using the [prom/mysqld-exporter](https://registry.hub.docker.com/u/prom/mysqld-exporter/) Docker image. For example: ```bash docker network create my-mysql-network docker pull prom/mysqld-exporter docker run -d \ -p 9104:9104 \ --network my-mysql-network \ -e DATA_SOURCE_NAME="user:password@(my-mysql-network:3306)/" \ prom/mysqld-exporter ``` ## heartbeat With `collect.heartbeat` enabled, mysqld_exporter will scrape replication delay measured by heartbeat mechanisms. [Pt-heartbeat][pth] is the reference heartbeat implementation supported. [pth]:https://www.percona.com/doc/percona-toolkit/2.2/pt-heartbeat.html ## Prometheus Configuration The mysqld exporter will expose all metrics from enabled collectors by default, but it can be passed an optional list of collectors to filter metrics. The `collect[]` parameter accepts values matching [Collector Flags](#collector-flags) names (without `collect.` prefix). This can be useful for specifying different scrape intervals for different collectors. ```yaml scrape_configs: - job_name: 'mysql global status' scrape_interval: 15s static_configs: - targets: - '192.168.1.2:9104' params: collect[]: - global_status - job_name: 'mysql performance' scrape_interval: 1m static_configs: - targets: - '192.168.1.2:9104' params: collect[]: - perf_schema.tableiowaits - perf_schema.indexiowaits - perf_schema.tablelocks ``` ## Example Rules There are some sample rules available in [example.rules](example.rules) [circleci]: https://circleci.com/gh/prometheus/mysqld_exporter [hub]: https://hub.docker.com/r/prom/mysqld-exporter/ [travis]: https://travis-ci.org/prometheus/mysqld_exporter [quay]: https://quay.io/repository/prometheus/mysqld-exporter prometheus-mysqld-exporter-0.11.0+ds/VERSION000066400000000000000000000000071336116511700206140ustar00rootroot000000000000000.11.0 prometheus-mysqld-exporter-0.11.0+ds/collector/000077500000000000000000000000001336116511700215355ustar00rootroot00000000000000prometheus-mysqld-exporter-0.11.0+ds/collector/binlog.go000066400000000000000000000044241336116511700233420ustar00rootroot00000000000000// Scrape `SHOW BINARY LOGS` package collector import ( "database/sql" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. binlog = "binlog" // Queries. logbinQuery = `SELECT @@log_bin` binlogQuery = `SHOW BINARY LOGS` ) // Metric descriptors. var ( binlogSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "size_bytes"), "Combined size of all registered binlog files.", []string{}, nil, ) binlogFilesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "files"), "Number of registered binlog files.", []string{}, nil, ) binlogFileNumberDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "file_number"), "The last binlog file number.", []string{}, nil, ) ) // ScrapeBinlogSize colects from `SHOW BINARY LOGS`. type ScrapeBinlogSize struct{} // Name of the Scraper. Should be unique. func (ScrapeBinlogSize) Name() string { return "binlog_size" } // Help describes the role of the Scraper. func (ScrapeBinlogSize) Help() string { return "Collect the current size of all registered binlog files" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var logBin uint8 err := db.QueryRow(logbinQuery).Scan(&logBin) if err != nil { return err } // If log_bin is OFF, do not run SHOW BINARY LOGS which explicitly produces MySQL error if logBin == 0 { return nil } masterLogRows, err := db.Query(binlogQuery) if err != nil { return err } defer masterLogRows.Close() var ( size uint64 count uint64 filename string filesize uint64 ) size = 0 count = 0 for masterLogRows.Next() { if err := masterLogRows.Scan(&filename, &filesize); err != nil { return nil } size += filesize count++ } ch <- prometheus.MustNewConstMetric( binlogSizeDesc, prometheus.GaugeValue, float64(size), ) ch <- prometheus.MustNewConstMetric( binlogFilesDesc, prometheus.GaugeValue, float64(count), ) // The last row contains the last binlog file number. value, _ := strconv.ParseFloat(strings.Split(filename, ".")[1], 64) ch <- prometheus.MustNewConstMetric( binlogFileNumberDesc, prometheus.GaugeValue, value, ) return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/binlog_test.go000066400000000000000000000027601336116511700244020ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeBinlogSize(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(logbinQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) columns := []string{"Log_name", "File_size"} rows := sqlmock.NewRows(columns). AddRow("centos6-bin.000001", "1813"). AddRow("centos6-bin.000002", "120"). AddRow("centos6-bin.000444", "573009") mock.ExpectQuery(sanitizeQuery(binlogQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 574942, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 3, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 444, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/collector.go000066400000000000000000000030611336116511700240520ustar00rootroot00000000000000package collector import ( "bytes" "database/sql" "regexp" "strconv" "github.com/prometheus/client_golang/prometheus" ) const ( // Exporter namespace. namespace = "mysql" // Math constant for picoseconds to seconds. picoSeconds = 1e12 // Query to check whether user/table/client stats are enabled. userstatCheckQuery = `SHOW VARIABLES WHERE Variable_Name='userstat' OR Variable_Name='userstat_running'` ) var logRE = regexp.MustCompile(`.+\.(\d+)$`) func newDesc(subsystem, name, help string) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), help, nil, nil, ) } func parseStatus(data sql.RawBytes) (float64, bool) { if bytes.Compare(data, []byte("Yes")) == 0 || bytes.Compare(data, []byte("ON")) == 0 { return 1, true } if bytes.Compare(data, []byte("No")) == 0 || bytes.Compare(data, []byte("OFF")) == 0 { return 0, true } // SHOW SLAVE STATUS Slave_IO_Running can return "Connecting" which is a non-running state. if bytes.Compare(data, []byte("Connecting")) == 0 { return 0, true } // SHOW GLOBAL STATUS like 'wsrep_cluster_status' can return "Primary" or "Non-Primary"/"Disconnected" if bytes.Compare(data, []byte("Primary")) == 0 { return 1, true } if bytes.Compare(data, []byte("Non-Primary")) == 0 || bytes.Compare(data, []byte("Disconnected")) == 0 { return 0, true } if logNum := logRE.Find(data); logNum != nil { value, err := strconv.ParseFloat(string(logNum), 64) return value, err == nil } value, err := strconv.ParseFloat(string(data), 64) return value, err == nil } prometheus-mysqld-exporter-0.11.0+ds/collector/collector_test.go000066400000000000000000000021351336116511700251120ustar00rootroot00000000000000package collector import ( "strings" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) type labelMap map[string]string type MetricResult struct { labels labelMap value float64 metricType dto.MetricType } func readMetric(m prometheus.Metric) MetricResult { pb := &dto.Metric{} m.Write(pb) labels := make(labelMap, len(pb.Label)) for _, v := range pb.Label { labels[v.GetName()] = v.GetValue() } if pb.Gauge != nil { return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} } if pb.Counter != nil { return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} } if pb.Untyped != nil { return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} } panic("Unsupported metric type") } func sanitizeQuery(q string) string { q = strings.Join(strings.Fields(q), " ") q = strings.Replace(q, "(", "\\(", -1) q = strings.Replace(q, ")", "\\)", -1) q = strings.Replace(q, "*", "\\*", -1) return q } prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb.go000066400000000000000000000043131336116511700246630ustar00rootroot00000000000000// Scrape `SHOW ENGINE INNODB STATUS`. package collector import ( "database/sql" "regexp" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. innodb = "engine_innodb" // Query. engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS` ) // ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`. type ScrapeEngineInnodbStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeEngineInnodbStatus) Name() string { return "engine_innodb_status" } // Help describes the role of the Scraper. func (ScrapeEngineInnodbStatus) Help() string { return "Collect from SHOW ENGINE INNODB STATUS" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.Query(engineInnodbStatusQuery) if err != nil { return err } defer rows.Close() var typeCol, nameCol, statusCol string // First row should contain the necessary info. If many rows returned then it's unknown case. if rows.Next() { if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil { return err } } // 0 queries inside InnoDB, 0 queries in queue // 0 read views open inside InnoDB rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`) rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`) for _, line := range strings.Split(statusCol, "\n") { if data := rQueries.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."), prometheus.GaugeValue, value, ) value, _ = strconv.ParseFloat(data[2], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_in_queue", "Queries in queue."), prometheus.GaugeValue, value, ) } else if data := rViews.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."), prometheus.GaugeValue, value, ) } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/engine_innodb_test.go000066400000000000000000000133021336116511700257200ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeEngineInnodbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() sample := ` ===================================== 2016-09-14 19:04:38 0x7fed21462700 INNODB MONITOR OUTPUT ===================================== Per second averages calculated from the last 30 seconds ----------------- BACKGROUND THREAD ----------------- srv_master_thread loops: 1 srv_active, 0 srv_shutdown, 49166 srv_idle srv_master_thread log flush and writes: 49165 ---------- SEMAPHORES ---------- OS WAIT ARRAY INFO: reservation count 15 OS WAIT ARRAY INFO: signal count 12 RW-shared spins 0, rounds 4, OS waits 2 RW-excl spins 0, rounds 0, OS waits 0 RW-sx spins 0, rounds 0, OS waits 0 Spin rounds per wait: 4.00 RW-shared, 0.00 RW-excl, 0.00 RW-sx ------------ TRANSACTIONS ------------ Trx id counter 67843 Purge done for trx's n:o < 55764 undo n:o < 0 state: running but idle History list length 779 LIST OF TRANSACTIONS FOR EACH SESSION: ---TRANSACTION 422131596298608, not started 0 lock struct(s), heap size 1136, 0 row lock(s) -------- FILE I/O -------- I/O thread 0 state: waiting for completed aio requests (insert buffer thread) I/O thread 1 state: waiting for completed aio requests (log thread) I/O thread 2 state: waiting for completed aio requests (read thread) I/O thread 3 state: waiting for completed aio requests (read thread) I/O thread 4 state: waiting for completed aio requests (read thread) I/O thread 5 state: waiting for completed aio requests (read thread) I/O thread 6 state: waiting for completed aio requests (write thread) I/O thread 7 state: waiting for completed aio requests (write thread) I/O thread 8 state: waiting for completed aio requests (write thread) I/O thread 9 state: waiting for completed aio requests (write thread) Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , ibuf aio reads:, log i/o's:, sync i/o's: Pending flushes (fsync) log: 0; buffer pool: 0 512 OS file reads, 57 OS file writes, 8 OS fsyncs 0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s ------------------------------------- INSERT BUFFER AND ADAPTIVE HASH INDEX ------------------------------------- Ibuf: size 1, free list len 0, seg size 2, 0 merges merged operations: insert 0, delete mark 0, delete 0 discarded operations: insert 0, delete mark 0, delete 0 Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) 0.00 hash searches/s, 0.00 non-hash searches/s --- LOG --- Log sequence number 37771171 Log flushed up to 37771171 Pages flushed up to 37771171 Last checkpoint at 37771162 Max checkpoint age 80826164 Checkpoint age target 78300347 Modified age 0 Checkpoint age 9 0 pending log flushes, 0 pending chkp writes 10 log i/o's done, 0.00 log i/o's/second ---------------------- BUFFER POOL AND MEMORY ---------------------- Total large memory allocated 139722752 Dictionary memory allocated 367821 Internal hash tables (constant factor + variable factor) Adaptive hash index 2252736 (2219072 + 33664) Page hash 139112 (buffer pool 0 only) Dictionary cache 922589 (554768 + 367821) File system 839328 (812272 + 27056) Lock system 334008 (332872 + 1136) Recovery system 0 (0 + 0) Buffer pool size 8191 Buffer pool size, bytes 0 Free buffers 7684 Database pages 507 Old database pages 0 Modified db pages 0 Pending reads 0 Pending writes: LRU 0, flush list 0, single page 0 Pages made young 0, not young 0 0.00 youngs/s, 0.00 non-youngs/s Pages read 473, created 34, written 36 0.00 reads/s, 0.00 creates/s, 0.00 writes/s No buffer pool page gets since the last printout Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s LRU len: 507, unzip_LRU len: 0 I/O sum[0]:cur[0], unzip sum[0]:cur[0] -------------- ROW OPERATIONS -------------- 661 queries inside InnoDB, 10 queries in queue 15 read views open inside InnoDB 0 RW transactions active inside InnoDB Process ID=1, Main thread ID=140656308950784, state: sleeping Number of rows inserted 0, updated 0, deleted 0, read 12 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s ---------------------------- END OF INNODB MONITOR OUTPUT ============================ ` columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns).AddRow("InnoDB", "", sample) mock.ExpectQuery(sanitizeQuery(engineInnodbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 661, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 15, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb.go000066400000000000000000000032371336116511700247060ustar00rootroot00000000000000// Scrape `SHOW ENGINE TOKUDB STATUS`. package collector import ( "database/sql" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. tokudb = "engine_tokudb" // Query. engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS` ) // ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. type ScrapeEngineTokudbStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeEngineTokudbStatus) Name() string { return "engine_tokudb_status" } // Help describes the role of the Scraper. func (ScrapeEngineTokudbStatus) Help() string { return "Collect from SHOW ENGINE TOKUDB STATUS" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { tokudbRows, err := db.Query(engineTokudbStatusQuery) if err != nil { return err } defer tokudbRows.Close() var temp, key string var val sql.RawBytes for tokudbRows.Next() { if err := tokudbRows.Scan(&temp, &key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(tokudb, sanitizeTokudbMetric(key), "Generic metric from SHOW ENGINE TOKUDB STATUS."), prometheus.UntypedValue, floatVal, ) } } return nil } func sanitizeTokudbMetric(metricName string) string { replacements := map[string]string{ ">": "", ",": "", ":": "", "(": "", ")": "", " ": "_", "-": "_", "+": "and", "/": "and", } for r := range replacements { metricName = strings.Replace(metricName, r, replacements[r], -1) } return metricName } prometheus-mysqld-exporter-0.11.0+ds/collector/engine_tokudb_test.go000066400000000000000000000051251336116511700257430ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestSanitizeTokudbMetric(t *testing.T) { samples := map[string]string{ "loader: number of calls to loader->close() that failed": "loader_number_of_calls_to_loader_close_that_failed", "ft: promotion: stopped anyway, after locking the child": "ft_promotion_stopped_anyway_after_locking_the_child", "ft: basement nodes deserialized with fixed-keysize": "ft_basement_nodes_deserialized_with_fixed_keysize", "memory: number of bytes used (requested + overhead)": "memory_number_of_bytes_used_requested_and_overhead", "ft: uncompressed / compressed bytes written (overall)": "ft_uncompressed_and_compressed_bytes_written_overall", } convey.Convey("Replacement tests", t, func() { for metric := range samples { got := sanitizeTokudbMetric(metric) convey.So(got, convey.ShouldEqual, samples[metric]) } }) } func TestScrapeEngineTokudbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns). AddRow("TokuDB", "indexer: number of calls to indexer->build() succeeded", "1"). AddRow("TokuDB", "ft: promotion: stopped anyway, after locking the child", "45316247"). AddRow("TokuDB", "memory: mallocator version", "3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784"). AddRow("TokuDB", "filesystem: most recent disk full", "Thu Jan 1 00:00:00 1970"). AddRow("TokuDB", "locktree: time spent ending the STO early (seconds)", "9115.904484") mock.ExpectQuery(sanitizeQuery(engineTokudbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 45316247, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 9115.904484, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/exporter.go000066400000000000000000000115211336116511700237340ustar00rootroot00000000000000package collector import ( "database/sql" "fmt" "strings" "sync" "time" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" "gopkg.in/alecthomas/kingpin.v2" ) // Metric name parts. const ( // Subsystem(s). exporter = "exporter" ) // SQL Queries. const ( // System variable params formatting. // See: https://github.com/go-sql-driver/mysql#system-variables sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27` timeoutParam = `lock_wait_timeout=%d` ) // Tunable flags. var ( exporterLockTimeout = kingpin.Flag( "exporter.lock_wait_timeout", "Set a lock_wait_timeout on the connection to avoid long metadata locking.", ).Default("2").Int() slowLogFilter = kingpin.Flag( "exporter.log_slow_filter", "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.", ).Default("false").Bool() ) // Metric descriptors. var ( scrapeDurationDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"), "Collector time duration.", []string{"collector"}, nil, ) ) // Exporter collects MySQL metrics. It implements prometheus.Collector. type Exporter struct { dsn string scrapers []Scraper metrics Metrics } // New returns a new MySQL exporter for the provided DSN. func New(dsn string, metrics Metrics, scrapers []Scraper) *Exporter { // Setup extra params for the DSN, default to having a lock timeout. dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)} if *slowLogFilter { dsnParams = append(dsnParams, sessionSettingsParam) } if strings.Contains(dsn, "?") { dsn = dsn + "&" } else { dsn = dsn + "?" } dsn += strings.Join(dsnParams, "&") return &Exporter{ dsn: dsn, scrapers: scrapers, metrics: metrics, } } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { ch <- e.metrics.TotalScrapes.Desc() ch <- e.metrics.Error.Desc() e.metrics.ScrapeErrors.Describe(ch) ch <- e.metrics.MySQLUp.Desc() } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.scrape(ch) ch <- e.metrics.TotalScrapes ch <- e.metrics.Error e.metrics.ScrapeErrors.Collect(ch) ch <- e.metrics.MySQLUp } func (e *Exporter) scrape(ch chan<- prometheus.Metric) { e.metrics.TotalScrapes.Inc() var err error scrapeTime := time.Now() db, err := sql.Open("mysql", e.dsn) if err != nil { log.Errorln("Error opening connection to database:", err) e.metrics.Error.Set(1) return } defer db.Close() // By design exporter should use maximum one connection per request. db.SetMaxOpenConns(1) db.SetMaxIdleConns(1) // Set max lifetime for a connection. db.SetConnMaxLifetime(1 * time.Minute) if err := db.Ping(); err != nil { log.Errorln("Error pinging mysqld:", err) e.metrics.MySQLUp.Set(0) e.metrics.Error.Set(1) return } e.metrics.MySQLUp.Set(1) ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection") wg := &sync.WaitGroup{} defer wg.Wait() for _, scraper := range e.scrapers { wg.Add(1) go func(scraper Scraper) { defer wg.Done() label := "collect." + scraper.Name() scrapeTime := time.Now() if err := scraper.Scrape(db, ch); err != nil { log.Errorln("Error scraping for "+label+":", err) e.metrics.ScrapeErrors.WithLabelValues(label).Inc() e.metrics.Error.Set(1) } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label) }(scraper) } } // Metrics represents exporter metrics which values can be carried between http requests. type Metrics struct { TotalScrapes prometheus.Counter ScrapeErrors *prometheus.CounterVec Error prometheus.Gauge MySQLUp prometheus.Gauge } // NewMetrics creates new Metrics instance. func NewMetrics() Metrics { subsystem := exporter return Metrics{ TotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "scrapes_total", Help: "Total number of times MySQL was scraped for metrics.", }), ScrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "scrape_errors_total", Help: "Total number of times an error occurred scraping a MySQL.", }, []string{"collector"}), Error: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "last_scrape_error", Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).", }), MySQLUp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "up", Help: "Whether the MySQL server is up.", }), } } prometheus-mysqld-exporter-0.11.0+ds/collector/exporter_test.go000066400000000000000000000015461336116511700250010ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/smartystreets/goconvey/convey" ) const dsn = "root@/mysql" func TestExporter(t *testing.T) { if testing.Short() { t.Skip("-short is passed, skipping test") } exporter := New( dsn, NewMetrics(), []Scraper{ ScrapeGlobalStatus{}, }) convey.Convey("Metrics describing", t, func() { ch := make(chan *prometheus.Desc) go func() { exporter.Describe(ch) close(ch) }() for range ch { } }) convey.Convey("Metrics collection", t, func() { ch := make(chan prometheus.Metric) go func() { exporter.Collect(ch) close(ch) }() for m := range ch { got := readMetric(m) if got.labels[model.MetricNameLabel] == "mysql_up" { convey.So(got.value, convey.ShouldEqual, 1) } } }) } prometheus-mysqld-exporter-0.11.0+ds/collector/global_status.go000066400000000000000000000115571336116511700247400ustar00rootroot00000000000000// Scrape `SHOW GLOBAL STATUS`. package collector import ( "database/sql" "regexp" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Scrape query. globalStatusQuery = `SHOW GLOBAL STATUS` // Subsystem. globalStatus = "global_status" ) // Regexp to match various groups of status vars. var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`) // Metric descriptors. var ( globalCommandsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "commands_total"), "Total number of executed MySQL commands.", []string{"command"}, nil, ) globalHandlerDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "handlers_total"), "Total number of executed MySQL handlers.", []string{"handler"}, nil, ) globalConnectionErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "connection_errors_total"), "Total number of MySQL connection errors.", []string{"error"}, nil, ) globalBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_pages"), "Innodb buffer pool pages by state.", []string{"state"}, nil, ) globalBufferPoolPageChangesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_page_changes_total"), "Innodb buffer pool page state changes.", []string{"operation"}, nil, ) globalInnoDBRowOpsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "innodb_row_ops_total"), "Total number of MySQL InnoDB row operations.", []string{"operation"}, nil, ) globalPerformanceSchemaLostDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "performance_schema_lost_total"), "Total number of MySQL instrumentations that could not be loaded or created due to memory constraints.", []string{"instrumentation"}, nil, ) ) // ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`. type ScrapeGlobalStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeGlobalStatus) Name() string { return globalStatus } // Help describes the role of the Scraper. func (ScrapeGlobalStatus) Help() string { return "Collect from SHOW GLOBAL STATUS" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { globalStatusRows, err := db.Query(globalStatusQuery) if err != nil { return err } defer globalStatusRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "wsrep_local_state_uuid": "", "wsrep_cluster_state_uuid": "", "wsrep_provider_version": "", } for globalStatusRows.Next() { if err := globalStatusRows.Scan(&key, &val); err != nil { return err } if floatVal, ok := parseStatus(val); ok { // Unparsable values are silently skipped. key = strings.ToLower(key) match := globalStatusRE.FindStringSubmatch(key) if match == nil { ch <- prometheus.MustNewConstMetric( newDesc(globalStatus, key, "Generic metric from SHOW GLOBAL STATUS."), prometheus.UntypedValue, floatVal, ) continue } switch match[1] { case "com": ch <- prometheus.MustNewConstMetric( globalCommandsDesc, prometheus.CounterValue, floatVal, match[2], ) case "handler": ch <- prometheus.MustNewConstMetric( globalHandlerDesc, prometheus.CounterValue, floatVal, match[2], ) case "connection_errors": ch <- prometheus.MustNewConstMetric( globalConnectionErrorsDesc, prometheus.CounterValue, floatVal, match[2], ) case "innodb_buffer_pool_pages": switch match[2] { case "data", "dirty", "free", "misc": ch <- prometheus.MustNewConstMetric( globalBufferPoolPagesDesc, prometheus.GaugeValue, floatVal, match[2], ) default: ch <- prometheus.MustNewConstMetric( globalBufferPoolPageChangesDesc, prometheus.CounterValue, floatVal, match[2], ) } case "innodb_rows": ch <- prometheus.MustNewConstMetric( globalInnoDBRowOpsDesc, prometheus.CounterValue, floatVal, match[2], ) case "performance_schema": ch <- prometheus.MustNewConstMetric( globalPerformanceSchemaLostDesc, prometheus.CounterValue, floatVal, match[2], ) } } else if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_galera_variables_info metric. if textItems["wsrep_local_state_uuid"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "status_info"), "PXC/Galera status information.", []string{"wsrep_local_state_uuid", "wsrep_cluster_state_uuid", "wsrep_provider_version"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_local_state_uuid"], textItems["wsrep_cluster_state_uuid"], textItems["wsrep_provider_version"], ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/global_status_test.go000066400000000000000000000060071336116511700257710ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeGlobalStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("Com_alter_db", "1"). AddRow("Com_show_status", "2"). AddRow("Com_select", "3"). AddRow("Connection_errors_internal", "4"). AddRow("Handler_commit", "5"). AddRow("Innodb_buffer_pool_pages_data", "6"). AddRow("Innodb_buffer_pool_pages_flushed", "7"). AddRow("Innodb_rows_read", "8"). AddRow("Performance_schema_users_lost", "9"). AddRow("Slave_running", "OFF"). AddRow("Ssl_version", ""). AddRow("Uptime", "10"). AddRow("wsrep_cluster_status", "Primary"). AddRow("wsrep_local_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_cluster_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_provider_version", "3.16(r5c765eb)") mock.ExpectQuery(sanitizeQuery(globalStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"command": "alter_db"}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "show_status"}, value: 2, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "select"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"error": "internal"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"handler": "commit"}, value: 5, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"operation": "flushed"}, value: 7, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"operation": "read"}, value: 8, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"instrumentation": "users_lost"}, value: 9, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"wsrep_local_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_cluster_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_provider_version": "3.16(r5c765eb)"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables.go000066400000000000000000000061001336116511700253510ustar00rootroot00000000000000// Scrape `SHOW GLOBAL VARIABLES`. package collector import ( "database/sql" "regexp" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Metric subsystem globalVariables = "global_variables" // Metric SQL Queries. globalVariablesQuery = `SHOW GLOBAL VARIABLES` ) // ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`. type ScrapeGlobalVariables struct{} // Name of the Scraper. Should be unique. func (ScrapeGlobalVariables) Name() string { return globalVariables } // Help describes the role of the Scraper. func (ScrapeGlobalVariables) Help() string { return "Collect from SHOW GLOBAL VARIABLES" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { globalVariablesRows, err := db.Query(globalVariablesQuery) if err != nil { return err } defer globalVariablesRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "innodb_version": "", "version": "", "version_comment": "", "wsrep_cluster_name": "", "wsrep_provider_options": "", } for globalVariablesRows.Next() { if err := globalVariablesRows.Scan(&key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(globalVariables, key, "Generic gauge metric from SHOW GLOBAL VARIABLES."), prometheus.GaugeValue, floatVal, ) continue } else if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_version_info metric. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "version", "info"), "MySQL version and distribution.", []string{"innodb_version", "version", "version_comment"}, nil), prometheus.GaugeValue, 1, textItems["innodb_version"], textItems["version"], textItems["version_comment"], ) // mysql_galera_variables_info metric. if textItems["wsrep_cluster_name"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "variables_info"), "PXC/Galera variables information.", []string{"wsrep_cluster_name"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_cluster_name"], ) } // mysql_galera_gcache_size_bytes metric. if textItems["wsrep_provider_options"] != "" { ch <- prometheus.MustNewConstMetric( newDesc("galera", "gcache_size_bytes", "PXC/Galera gcache size."), prometheus.GaugeValue, parseWsrepProviderOptions(textItems["wsrep_provider_options"]), ) } return nil } // parseWsrepProviderOptions parse wsrep_provider_options to get gcache.size in bytes. func parseWsrepProviderOptions(opts string) float64 { var val float64 r, _ := regexp.Compile(`gcache.size = (\d+)([MG]?);`) data := r.FindStringSubmatch(opts) if data == nil { return 0 } val, _ = strconv.ParseFloat(data[1], 64) switch data[2] { case "M": val = val * 1024 * 1024 case "G": val = val * 1024 * 1024 * 1024 } return val } prometheus-mysqld-exporter-0.11.0+ds/collector/global_variables_test.go000066400000000000000000000203031336116511700264110ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeGlobalVariables(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("wait_timeout", "28800"). AddRow("version_compile_os", "Linux"). AddRow("userstat", "OFF"). AddRow("transaction_prealloc_size", "4096"). AddRow("tx_isolation", "REPEATABLE-READ"). AddRow("tmp_table_size", "16777216"). AddRow("tmpdir", "/tmp"). AddRow("sync_binlog", "0"). AddRow("sync_frm", "ON"). AddRow("slow_launch_time", "2"). AddRow("innodb_version", "5.6.30-76.3"). AddRow("version", "5.6.30-76.3-56"). AddRow("version_comment", "Percona XtraDB Cluster..."). AddRow("wsrep_cluster_name", "supercluster"). AddRow("wsrep_provider_options", "base_dir = /var/lib/mysql/; base_host = 10.91.142.82; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 128M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.142.82; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;") mock.ExpectQuery(globalVariablesQuery).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeGlobalVariables{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 28800, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 4096, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 16777216, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"innodb_version": "5.6.30-76.3", "version": "5.6.30-76.3-56", "version_comment": "Percona XtraDB Cluster..."}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"wsrep_cluster_name": "supercluster"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 134217728, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } func TestParseWsrepProviderOptions(t *testing.T) { testE := "" testM := "base_dir = /var/lib/mysql/; base_host = 10.91.142.82; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 128M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.142.82; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;" testG := "base_dir = /var/lib/mysql/; base_host = 10.91.194.244; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 2G; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.194.244; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;" testB := "gcache.page_size = 128M; gcache.size = 131072; gcomm.thread_prio = ;" convey.Convey("Parse wsrep_provider_options", t, func() { convey.So(parseWsrepProviderOptions(testE), convey.ShouldEqual, 0) convey.So(parseWsrepProviderOptions(testM), convey.ShouldEqual, 128*1024*1024) convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, int64(2*1024*1024*1024)) convey.So(parseWsrepProviderOptions(testB), convey.ShouldEqual, 131072) }) } prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat.go000066400000000000000000000055501336116511700240300ustar00rootroot00000000000000// Scrape heartbeat data. package collector import ( "database/sql" "fmt" "strconv" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) const ( // heartbeat is the Metric subsystem we use. heartbeat = "heartbeat" // heartbeatQuery is the query used to fetch the stored and current // timestamps. %s will be replaced by the database and table name. // The second column allows gets the server timestamp at the exact same // time the query is run. heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`" ) var ( collectHeartbeatDatabase = kingpin.Flag( "collect.heartbeat.database", "Database from where to collect heartbeat data", ).Default("heartbeat").String() collectHeartbeatTable = kingpin.Flag( "collect.heartbeat.table", "Table from where to collect heartbeat data", ).Default("heartbeat").String() ) // Metric descriptors. var ( HeartbeatStoredDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "stored_timestamp_seconds"), "Timestamp stored in the heartbeat table.", []string{"server_id"}, nil, ) HeartbeatNowDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "now_timestamp_seconds"), "Timestamp of the current server.", []string{"server_id"}, nil, ) ) // ScrapeHeartbeat scrapes from the heartbeat table. // This is mainly targeting pt-heartbeat, but will work with any heartbeat // implementation that writes to a table with two columns: // CREATE TABLE heartbeat ( // ts varchar(26) NOT NULL, // server_id int unsigned NOT NULL PRIMARY KEY, // ); type ScrapeHeartbeat struct{} // Name of the Scraper. Should be unique. func (ScrapeHeartbeat) Name() string { return "heartbeat" } // Help describes the role of the Scraper. func (ScrapeHeartbeat) Help() string { return "Collect from heartbeat" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeHeartbeat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable) heartbeatRows, err := db.Query(query) if err != nil { return err } defer heartbeatRows.Close() var ( now, ts sql.RawBytes serverId int ) for heartbeatRows.Next() { if err := heartbeatRows.Scan(&ts, &now, &serverId); err != nil { return err } tsFloatVal, err := strconv.ParseFloat(string(ts), 64) if err != nil { return err } nowFloatVal, err := strconv.ParseFloat(string(now), 64) if err != nil { return err } serverId := strconv.Itoa(serverId) ch <- prometheus.MustNewConstMetric( HeartbeatNowDesc, prometheus.GaugeValue, nowFloatVal, serverId, ) ch <- prometheus.MustNewConstMetric( HeartbeatStoredDesc, prometheus.GaugeValue, tsFloatVal, serverId, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/heartbeat_test.go000066400000000000000000000032441336116511700250650ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" "gopkg.in/alecthomas/kingpin.v2" ) func TestScrapeHeartbeat(t *testing.T) { _, err := kingpin.CommandLine.Parse([]string{ "--collect.heartbeat.database", "heartbeat-test", "--collect.heartbeat.table", "heartbeat-test", }) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"} rows := sqlmock.NewRows(columns). AddRow("1487597613.001320", "1487598113.448042", 1) mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeHeartbeat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "1"}, value: 1487598113.448042, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "1"}, value: 1487597613.00132, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema.go000066400000000000000000000001111336116511700243300ustar00rootroot00000000000000package collector // Subsystem. const informationSchema = "info_schema" prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_auto_increment.go000066400000000000000000000046571336116511700274470ustar00rootroot00000000000000// Scrape auto_increment column information. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaAutoIncrementQuery = ` SELECT table_schema, table_name, column_name, auto_increment, pow(2, case data_type when 'tinyint' then 7 when 'smallint' then 15 when 'mediumint' then 23 when 'int' then 31 when 'bigint' then 63 end+(column_type like '% unsigned'))-1 as max_int FROM information_schema.tables t JOIN information_schema.columns c USING (table_schema,table_name) WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL ` // Metric descriptors. var ( globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"), "The current value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) globalInfoSchemaAutoIncrementMaxDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column_max"), "The max value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) ) // ScrapeAutoIncrementColumns collects auto_increment column information. type ScrapeAutoIncrementColumns struct{} // Name of the Scraper. Should be unique. func (ScrapeAutoIncrementColumns) Name() string { return "auto_increment.columns" } // Help describes the role of the Scraper. func (ScrapeAutoIncrementColumns) Help() string { return "Collect auto_increment columns and max values from information_schema" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeAutoIncrementColumns) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery) if err != nil { return err } defer autoIncrementRows.Close() var ( schema, table, column string value, max float64 ) for autoIncrementRows.Next() { if err := autoIncrementRows.Scan( &schema, &table, &column, &value, &max, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementDesc, prometheus.GaugeValue, value, schema, table, column, ) ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementMaxDesc, prometheus.GaugeValue, max, schema, table, column, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats.go000066400000000000000000000245361336116511700267660ustar00rootroot00000000000000// Scrape `information_schema.client_statistics`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const clientStatQuery = `SELECT * FROM information_schema.client_statistics` var ( // Map known client-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaClientStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_connections"), "The number of connections created for this client.", []string{"client"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_concurrent_connections"), "The number of concurrent connections for this client.", []string{"client"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this client.", []string{"client"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this client.", []string{"client"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this client's connections.", []string{"client"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_received_total"), "The number of bytes received from this client’s connections.", []string{"client"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_sent_total"), "The number of bytes sent to this client’s connections.", []string{"client"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this client’s connections.", []string{"client"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_read_total"), "The number of rows read by this client’s connections.", []string{"client"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_sent_total"), "The number of rows sent by this client’s connections.", []string{"client"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_deleted_total"), "The number of rows deleted by this client’s connections.", []string{"client"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_inserted_total"), "The number of rows inserted by this client’s connections.", []string{"client"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_fetched_total"), "The number of rows fetched by this client’s connections.", []string{"client"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_updated_total"), "The number of rows updated by this client’s connections.", []string{"client"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_table_rows_read_total"), "The number of rows read from tables by this client’s connections. (It may be different from ROWS_FETCHED.)", []string{"client"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_select_commands_total"), "The number of SELECT commands executed from this client’s connections.", []string{"client"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_update_commands_total"), "The number of UPDATE commands executed from this client’s connections.", []string{"client"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_other_commands_total"), "The number of other commands executed from this client’s connections.", []string{"client"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this client’s connections.", []string{"client"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this client’s connections.", []string{"client"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_denied_connections_total"), "The number of connections denied to this client.", []string{"client"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_lost_connections_total"), "The number of this client’s connections that were terminated uncleanly.", []string{"client"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_access_denied_total"), "The number of times this client’s connections issued commands that were denied.", []string{"client"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_empty_queries_total"), "The number of times this client’s connections sent empty queries to the server.", []string{"client"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_ssl_connections_total"), "The number of times this client’s connections connected using SSL to the server.", []string{"client"}, nil)}, "MAX_STATEMENT_TIME_EXCEEDED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_max_statement_time_exceeded_total"), "The number of times a statement was aborted, because it was executed longer than its MAX_STATEMENT_TIME threshold.", []string{"client"}, nil)}, } ) // ScrapeClientStat collects from `information_schema.client_statistics`. type ScrapeClientStat struct{} // Name of the Scraper. Should be unique. func (ScrapeClientStat) Name() string { return "info_schema.clientstats" } // Help describes the role of the Scraper. func (ScrapeClientStat) Help() string { return "If running with userstat=1, set to true to collect client statistics" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeClientStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed client stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaClientStatisticsRows, err := db.Query(clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_clientstats_test.go000066400000000000000000000074651336116511700300270ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeClientStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"CLIENT", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("localhost", 1002, 0, 127027, 286, 245, float64(2565104853), 21090856, float64(2380108042), 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(clientStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeClientStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"client": "localhost"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client": "localhost"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: float64(2565104853), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: float64(2380108042), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp.go000066400000000000000000000064741336116511700265420ustar00rootroot00000000000000// Scrape `information_schema.INNODB_CMP`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const innodbCmpQuery = ` SELECT page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time FROM information_schema.innodb_cmp ` // Metric descriptors. var ( infoSchemaInnodbCmpCompressOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"), "Number of times a B-tree page of the size PAGE_SIZE has been compressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"), "Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpCompressTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"), "Total time in seconds spent in attempts to compress B-tree pages.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"), "Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"), "Total time in seconds spent in uncompressing B-tree pages.", []string{"page_size"}, nil, ) ) // ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. type ScrapeInnodbCmp struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbCmp) Name() string { return informationSchema + ".innodb_cmp" } // Help describes the role of the Scraper. func (ScrapeInnodbCmp) Help() string { return "Collect metrics from information_schema.innodb_cmp" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery) if err != nil { return err } defer informationSchemaInnodbCmpRows.Close() var ( page_size string compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64 ) for informationSchemaInnodbCmpRows.Next() { if err := informationSchemaInnodbCmpRows.Scan( &page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmp_test.go000066400000000000000000000031641336116511700275720ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeInnodbCmp(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"} rows := sqlmock.NewRows(columns). AddRow("1024", 10, 20, 30, 40, 50) mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem.go000066400000000000000000000061231336116511700272300ustar00rootroot00000000000000// Scrape `information_schema.INNODB_CMPMEM`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const innodbCmpMemQuery = ` SELECT page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time FROM information_schema.innodb_cmpmem ` // Metric descriptors. var ( infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"), "Number of blocks of the size PAGE_SIZE that are currently in use.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemPagesFree = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"), "Number of blocks of the size PAGE_SIZE that are currently available for allocation.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemRelocationOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"), "Number of times a block of the size PAGE_SIZE has been relocated.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemRelocationTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"), "Total time in seconds spent in relocating blocks.", []string{"page_size", "buffer_pool"}, nil, ) ) // ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. type ScrapeInnodbCmpMem struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbCmpMem) Name() string { return informationSchema + ".innodb_cmpmem" } // Help describes the role of the Scraper. func (ScrapeInnodbCmpMem) Help() string { return "Collect metrics from information_schema.innodb_cmpmem" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbCmpMem) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery) if err != nil { return err } defer informationSchemaInnodbCmpMemRows.Close() var ( page_size, buffer_pool string pages_used, pages_free, relocation_ops, relocation_time float64 ) for informationSchemaInnodbCmpMemRows.Next() { if err := informationSchemaInnodbCmpMemRows.Scan( &page_size, &buffer_pool, &pages_used, &pages_free, &relocation_ops, &relocation_time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesRead, prometheus.CounterValue, pages_used, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_cmpmem_test.go000066400000000000000000000031541336116511700302700ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeInnodbCmpMem(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"page_size", "buffer_pool", "pages_used", "pages_free", "relocation_ops", "relocation_time"} rows := sqlmock.NewRows(columns). AddRow("1024", "0", 30, 40, 50, 6000) mock.ExpectQuery(sanitizeQuery(innodbCmpMemQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbCmpMem{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 30, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 40, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 50, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 6, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics.go000066400000000000000000000110341336116511700274150ustar00rootroot00000000000000// Scrape `information_schema.innodb_metrics`. package collector import ( "database/sql" "regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const infoSchemaInnodbMetricsQuery = ` SELECT name, subsystem, type, comment, count FROM information_schema.innodb_metrics WHERE status = 'enabled' ` // Metrics descriptors. var ( infoSchemaBufferPageReadTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_read_total"), "Total number of buffer pages read total.", []string{"type"}, nil, ) infoSchemaBufferPageWrittenTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_written_total"), "Total number of buffer pages written total.", []string{"type"}, nil, ) infoSchemaBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_pages"), "Total number of buffer pool pages by state.", []string{"state"}, nil, ) infoSchemaBufferPoolPagesDirtyDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_dirty_pages"), "Total number of dirty pages in the buffer pool.", nil, nil, ) ) // Regexp for matching metric aggregations. var ( bufferRE = regexp.MustCompile(`^buffer_(pool_pages)_(.*)$`) bufferPageRE = regexp.MustCompile(`^buffer_page_(read|written)_(.*)$`) ) // ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`. type ScrapeInnodbMetrics struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbMetrics) Name() string { return informationSchema + ".innodb_metrics" } // Help describes the role of the Scraper. func (ScrapeInnodbMetrics) Help() string { return "Collect metrics from information_schema.innodb_metrics" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbMetrics) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery) if err != nil { return err } defer innodbMetricsRows.Close() var ( name, subsystem, metricType, comment string value float64 ) for innodbMetricsRows.Next() { if err := innodbMetricsRows.Scan( &name, &subsystem, &metricType, &comment, &value, ); err != nil { return err } // Special handling of the "buffer_page_io" subsystem. if subsystem == "buffer_page_io" { match := bufferPageRE.FindStringSubmatch(name) if len(match) != 3 { log.Warnln("innodb_metrics subsystem buffer_page_io returned an invalid name:", name) continue } switch match[1] { case "read": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageReadTotalDesc, prometheus.CounterValue, value, match[2], ) case "written": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageWrittenTotalDesc, prometheus.CounterValue, value, match[2], ) } continue } if subsystem == "buffer" { match := bufferRE.FindStringSubmatch(name) // Many buffer subsystem metrics are not matched, fall through to generic metric. if match != nil { switch match[1] { case "pool_pages": switch match[2] { case "total": // Ignore total, it is an aggregation of the rest. continue case "dirty": // Dirty pages are a separate metric, not in the total. ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDirtyDesc, prometheus.GaugeValue, value, ) default: ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDesc, prometheus.GaugeValue, value, match[2], ) } } continue } } metricName := "innodb_metrics_" + subsystem + "_" + name // MySQL returns counters named two different ways. "counter" and "status_counter" // value >= 0 is necessary due to upstream bugs: http://bugs.mysql.com/bug.php?id=75966 if (metricType == "counter" || metricType == "status_counter") && value >= 0 { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName+"_total"), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.CounterValue, value, ) } else { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.GaugeValue, value, ) } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_metrics_test.go000066400000000000000000000054051336116511700304610ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/log" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" "gopkg.in/alecthomas/kingpin.v2" ) func TestScrapeInnodbMetrics(t *testing.T) { // Suppress a log messages log.AddFlags(kingpin.CommandLine) _, err := kingpin.CommandLine.Parse([]string{"--log.level", "fatal"}) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"name", "subsystem", "type", "comment", "count"} rows := sqlmock.NewRows(columns). AddRow("lock_timeouts", "lock", "counter", "Number of lock timeouts", 0). AddRow("buffer_pool_reads", "buffer", "status_counter", "Number of reads directly from disk (innodb_buffer_pool_reads)", 1). AddRow("buffer_pool_size", "server", "value", "Server buffer pool size (all buffer pools) in bytes", 2). AddRow("buffer_page_read_system_page", "buffer_page_io", "counter", "Number of System Pages read", 3). AddRow("buffer_page_written_undo_log", "buffer_page_io", "counter", "Number of Undo Log Pages written", 4). AddRow("buffer_pool_pages_dirty", "buffer", "gauge", "Number of dirt buffer pool pages", 5). AddRow("buffer_pool_pages_data", "buffer", "gauge", "Number of data buffer pool pages", 6). AddRow("buffer_pool_pages_total", "buffer", "gauge", "Number of total buffer pool pages", 7). AddRow("NOPE", "buffer_page_io", "counter", "An invalid buffer_page_io metric", 999) mock.ExpectQuery(sanitizeQuery(infoSchemaInnodbMetricsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbMetrics{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"type": "system_page"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"type": "undo_log"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 5, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces.go000066400000000000000000000056661336116511700311510ustar00rootroot00000000000000// Scrape `information_schema.innodb_sys_tablespaces`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const innodbTablespacesQuery = ` SELECT SPACE, NAME, ifnull(FILE_FORMAT, 'NONE') as FILE_FORMAT, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(SPACE_TYPE, 'NONE') as SPACE_TYPE, FILE_SIZE, ALLOCATED_SIZE FROM information_schema.innodb_sys_tablespaces ` // Metric descriptors. var ( infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"), "The Tablespace information and Space ID.", []string{"tablespace_name", "file_format", "row_format", "space_type"}, nil, ) infoSchemaInnodbTablesspaceFileSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_file_size_bytes"), "The apparent size of the file, which represents the maximum size of the file, uncompressed.", []string{"tablespace_name"}, nil, ) infoSchemaInnodbTablesspaceAllocatedSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_allocated_size_bytes"), "The actual size of the file, which is the amount of space allocated on disk.", []string{"tablespace_name"}, nil, ) ) // ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`. type ScrapeInfoSchemaInnodbTablespaces struct{} // Name of the Scraper. Should be unique. func (ScrapeInfoSchemaInnodbTablespaces) Name() string { return informationSchema + ".innodb_tablespaces" } // Help describes the role of the Scraper. func (ScrapeInfoSchemaInnodbTablespaces) Help() string { return "Collect metrics from information_schema.innodb_sys_tablespaces" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInfoSchemaInnodbTablespaces) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { tablespacesRows, err := db.Query(innodbTablespacesQuery) if err != nil { return err } defer tablespacesRows.Close() var ( tableSpace uint32 tableName string fileFormat string rowFormat string spaceType string fileSize uint64 allocatedSize uint64 ) for tablespacesRows.Next() { err = tablespacesRows.Scan( &tableSpace, &tableName, &fileFormat, &rowFormat, &spaceType, &fileSize, &allocatedSize, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceInfoDesc, prometheus.GaugeValue, float64(tableSpace), tableName, fileFormat, rowFormat, spaceType, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceFileSizeDesc, prometheus.GaugeValue, float64(fileSize), tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceAllocatedSizeDesc, prometheus.GaugeValue, float64(allocatedSize), tableName, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_innodb_sys_tablespaces_test.go000066400000000000000000000041211336116511700321710ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeInfoSchemaInnodbTablespaces(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"SPACE", "NAME", "FILE_FORMAT", "ROW_FORMAT", "SPACE_TYPE", "FILE_SIZE", "ALLOCATED_SIZE"} rows := sqlmock.NewRows(columns). AddRow(1, "sys/sys_config", "Barracuda", "Dynamic", "Single", 100, 100). AddRow(2, "db/compressed", "Barracuda", "Compressed", "Single", 300, 200) mock.ExpectQuery(sanitizeQuery(innodbTablespacesQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInfoSchemaInnodbTablespaces{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"tablespace_name": "sys/sys_config", "file_format": "Barracuda", "row_format": "Dynamic", "space_type": "Single"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed", "file_format": "Barracuda", "row_format": "Compressed", "space_type": "Single"}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 300, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 200, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_processlist.go000066400000000000000000000161251336116511700267760ustar00rootroot00000000000000// Scrape `information_schema.processlist`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) const infoSchemaProcesslistQuery = ` SELECT COALESCE(command,''),COALESCE(state,''),count(*),sum(time) FROM information_schema.processlist WHERE ID != connection_id() AND TIME >= %d GROUP BY command,state ORDER BY null ` // Tunable flags. var ( processlistMinTime = kingpin.Flag( "collect.info_schema.processlist.min_time", "Minimum time a thread must be in each state to be counted", ).Default("0").Int() ) // Metric descriptors. var ( processlistCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "threads"), "The number of threads (connections) split by current state.", []string{"state"}, nil) processlistTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "threads_seconds"), "The number of seconds threads (connections) have used split by current state.", []string{"state"}, nil) ) // whitelist for connection/process states in SHOW PROCESSLIST // tokudb uses the state column for "Queried about _______ rows" var ( // TODO: might need some more keys for other MySQL versions or other storage engines // see https://dev.mysql.com/doc/refman/5.7/en/general-thread-states.html threadStateCounterMap = map[string]uint32{ "after create": uint32(0), "altering table": uint32(0), "analyzing": uint32(0), "checking permissions": uint32(0), "checking table": uint32(0), "cleaning up": uint32(0), "closing tables": uint32(0), "converting heap to myisam": uint32(0), "copying to tmp table": uint32(0), "creating sort index": uint32(0), "creating table": uint32(0), "creating tmp table": uint32(0), "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), "end": uint32(0), "freeing items": uint32(0), "flushing tables": uint32(0), "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), "waiting for lock": uint32(0), "logging slow query": uint32(0), "login": uint32(0), "manage keys": uint32(0), "opening tables": uint32(0), "optimizing": uint32(0), "preparing": uint32(0), "reading from net": uint32(0), "removing duplicates": uint32(0), "removing tmp table": uint32(0), "reopen tables": uint32(0), "repair by sorting": uint32(0), "repair done": uint32(0), "repair with keycache": uint32(0), "replication master": uint32(0), "rolling back": uint32(0), "searching rows for update": uint32(0), "sending data": uint32(0), "sorting for group": uint32(0), "sorting for order": uint32(0), "sorting index": uint32(0), "sorting result": uint32(0), "statistics": uint32(0), "updating": uint32(0), "waiting for tables": uint32(0), "waiting for table flush": uint32(0), "waiting on cond": uint32(0), "writing to net": uint32(0), "other": uint32(0), } threadStateMapping = map[string]string{ "user sleep": "idle", "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", "setup": "altering table", "renaming result table": "altering table", "preparing for alter table": "altering table", "copying to group table": "copying to tmp table", "copy to tmp table": "copying to tmp table", "query end": "end", "update": "updating", "updating main table": "updating", "updating reference tables": "updating", "system lock": "waiting for lock", "user lock": "waiting for lock", "table lock": "waiting for lock", "deleting from main table": "deleting", "deleting from reference tables": "deleting", } ) // ScrapeProcesslist collects from `information_schema.processlist`. type ScrapeProcesslist struct{} // Name of the Scraper. Should be unique. func (ScrapeProcesslist) Name() string { return informationSchema + ".processlist" } // Help describes the role of the Scraper. func (ScrapeProcesslist) Help() string { return "Collect current thread state counts from the information_schema.processlist" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeProcesslist) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, *processlistMinTime, ) processlistRows, err := db.Query(processQuery) if err != nil { return err } defer processlistRows.Close() var ( command string state string count uint32 time uint32 ) stateCounts := make(map[string]uint32, len(threadStateCounterMap)) stateTime := make(map[string]uint32, len(threadStateCounterMap)) for k, v := range threadStateCounterMap { stateCounts[k] = v stateTime[k] = v } for processlistRows.Next() { err = processlistRows.Scan(&command, &state, &count, &time) if err != nil { return err } realState := deriveThreadState(command, state) stateCounts[realState] += count stateTime[realState] += time } for state, count := range stateCounts { ch <- prometheus.MustNewConstMetric(processlistCountDesc, prometheus.GaugeValue, float64(count), state) } for state, time := range stateTime { ch <- prometheus.MustNewConstMetric(processlistTimeDesc, prometheus.GaugeValue, float64(time), state) } return nil } func deriveThreadState(command string, state string) string { var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1) var normState = strings.Replace(strings.ToLower(state), "_", " ", -1) // check if it's already a valid state _, knownState := threadStateCounterMap[normState] if knownState { return normState } // check if plain mapping applies mappedState, canMap := threadStateMapping[normState] if canMap { return mappedState } // check special waiting for XYZ lock if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") { return "waiting for lock" } if normCmd == "sleep" && normState == "" { return "idle" } if normCmd == "query" { return "executing" } if normCmd == "binlog dump" { return "replication master" } return "other" } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time.go000066400000000000000000000072311336116511700305230ustar00rootroot00000000000000// Scrape `information_schema.query_response_time*` tables. package collector import ( "database/sql" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const queryResponseCheckQuery = `SELECT @@query_response_time_stats` var ( // Use uppercase for table names, otherwise read/write split will return the same results as total // due to the bug. queryResponseTimeQueries = [3]string{ "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME", "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME_READ", "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME_WRITE", } infoSchemaQueryResponseTimeCountDescs = [3]*prometheus.Desc{ prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "query_response_time_seconds"), "The number of all queries by duration they took to execute.", []string{}, nil, ), prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "read_query_response_time_seconds"), "The number of read queries by duration they took to execute.", []string{}, nil, ), prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "write_query_response_time_seconds"), "The number of write queries by duration they took to execute.", []string{}, nil, ), } ) func processQueryResponseTimeTable(db *sql.DB, ch chan<- prometheus.Metric, query string, i int) error { queryDistributionRows, err := db.Query(query) if err != nil { return err } defer queryDistributionRows.Close() var ( length string count uint64 total string histogramCnt uint64 histogramSum float64 countBuckets = map[float64]uint64{} ) for queryDistributionRows.Next() { err = queryDistributionRows.Scan( &length, &count, &total, ) if err != nil { return err } length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64) total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64) histogramCnt += count histogramSum += total // Special case for "TOO LONG" row where we take into account the count field which is the only available // and do not add it as a part of histogram or metric if length == 0 { continue } countBuckets[length] = histogramCnt } // Create histogram with query counts ch <- prometheus.MustNewConstHistogram( infoSchemaQueryResponseTimeCountDescs[i], histogramCnt, histogramSum, countBuckets, ) return nil } // ScrapeQueryResponseTime collects from `information_schema.query_response_time`. type ScrapeQueryResponseTime struct{} // Name of the Scraper. Should be unique. func (ScrapeQueryResponseTime) Name() string { return "info_schema.query_response_time" } // Help describes the role of the Scraper. func (ScrapeQueryResponseTime) Help() string { return "Collect query response time distribution if query_response_time_stats is ON." } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeQueryResponseTime) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var queryStats uint8 err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats) if err != nil { log.Debugln("Query response time distribution is not present.") return nil } if queryStats == 0 { log.Debugln("query_response_time_stats is OFF.") return nil } for i, query := range queryResponseTimeQueries { err := processQueryResponseTimeTable(db, ch, query, i) // The first query should not fail if query_response_time_stats is ON, // unlike the other two when the read/write tables exist only with Percona Server 5.6/5.7. if i == 0 && err != nil { return err } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_query_response_time_test.go000066400000000000000000000042221336116511700315570ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeQueryResponseTime(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(queryResponseCheckQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) rows := sqlmock.NewRows([]string{"TIME", "COUNT", "TOTAL"}). AddRow(0.000001, 124, 0.000000). AddRow(0.000010, 179, 0.000797). AddRow(0.000100, 2859, 0.107321). AddRow(0.001000, 1085, 0.335395). AddRow(0.010000, 269, 0.522264). AddRow(0.100000, 11, 0.344209). AddRow(1.000000, 1, 0.267369). AddRow(10.000000, 0, 0.000000). AddRow(100.000000, 0, 0.000000). AddRow(1000.000000, 0, 0.000000). AddRow(10000.000000, 0, 0.000000). AddRow(100000.000000, 0, 0.000000). AddRow(1000000.000000, 0, 0.000000). AddRow("TOO LONG", 0, "TOO LONG") mock.ExpectQuery(sanitizeQuery(queryResponseTimeQueries[0])).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeQueryResponseTime{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() // Test histogram expectCounts := map[float64]uint64{ 1e-06: 124, 1e-05: 303, 0.0001: 3162, 0.001: 4247, 0.01: 4516, 0.1: 4527, 1: 4528, 10: 4528, 100: 4528, 1000: 4528, 10000: 4528, 100000: 4528, 1e+06: 4528, } expectHistogram := prometheus.MustNewConstHistogram(infoSchemaQueryResponseTimeCountDescs[0], 4528, 1.5773549999999998, expectCounts) expectPb := &dto.Metric{} expectHistogram.Write(expectPb) gotPb := &dto.Metric{} gotHistogram := <-ch // read the last item from channel gotHistogram.Write(gotPb) convey.Convey("Histogram comparison", t, func() { convey.So(expectPb.Histogram, convey.ShouldResemble, gotPb.Histogram) }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tables.go000066400000000000000000000104521336116511700256730ustar00rootroot00000000000000// Scrape `information_schema.tables`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) const ( tableSchemaQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ifnull(ENGINE, 'NONE') as ENGINE, ifnull(VERSION, '0') as VERSION, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(TABLE_ROWS, '0') as TABLE_ROWS, ifnull(DATA_LENGTH, '0') as DATA_LENGTH, ifnull(INDEX_LENGTH, '0') as INDEX_LENGTH, ifnull(DATA_FREE, '0') as DATA_FREE, ifnull(CREATE_OPTIONS, 'NONE') as CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_SCHEMA = '%s' ` dbListQuery = ` SELECT SCHEMA_NAME FROM information_schema.schemata WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') ` ) // Tunable flags. var ( tableSchemaDatabases = kingpin.Flag( "collect.info_schema.tables.databases", "The list of databases to collect table stats for, or '*' for all", ).Default("*").String() ) // Metric descriptors. var ( infoSchemaTablesVersionDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_version"), "The version number of the table's .frm file", []string{"schema", "table", "type", "engine", "row_format", "create_options"}, nil, ) infoSchemaTablesRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_rows"), "The estimated number of rows in the table from information_schema.tables", []string{"schema", "table"}, nil, ) infoSchemaTablesSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_size"), "The size of the table components from information_schema.tables", []string{"schema", "table", "component"}, nil, ) ) // ScrapeTableSchema collects from `information_schema.tables`. type ScrapeTableSchema struct{} // Name of the Scraper. Should be unique. func (ScrapeTableSchema) Name() string { return informationSchema + ".tables" } // Help describes the role of the Scraper. func (ScrapeTableSchema) Help() string { return "Collect metrics from information_schema.tables" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeTableSchema) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var dbList []string if *tableSchemaDatabases == "*" { dbListRows, err := db.Query(dbListQuery) if err != nil { return err } defer dbListRows.Close() var database string for dbListRows.Next() { if err := dbListRows.Scan( &database, ); err != nil { return err } dbList = append(dbList, database) } } else { dbList = strings.Split(*tableSchemaDatabases, ",") } for _, database := range dbList { tableSchemaRows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) if err != nil { return err } defer tableSchemaRows.Close() var ( tableSchema string tableName string tableType string engine string version uint64 rowFormat string tableRows uint64 dataLength uint64 indexLength uint64 dataFree uint64 createOptions string ) for tableSchemaRows.Next() { err = tableSchemaRows.Scan( &tableSchema, &tableName, &tableType, &engine, &version, &rowFormat, &tableRows, &dataLength, &indexLength, &dataFree, &createOptions, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTablesVersionDesc, prometheus.GaugeValue, float64(version), tableSchema, tableName, tableType, engine, rowFormat, createOptions, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesRowsDesc, prometheus.GaugeValue, float64(tableRows), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataLength), tableSchema, tableName, "data_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(indexLength), tableSchema, tableName, "index_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataFree), tableSchema, tableName, "data_free", ) } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats.go000066400000000000000000000057301336116511700265720ustar00rootroot00000000000000// Scrape `information_schema.table_statistics`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const tableStatQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, ROWS_READ, ROWS_CHANGED, ROWS_CHANGED_X_INDEXES FROM information_schema.table_statistics ` // Metric descriptors. var ( infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"), "The number of rows read from the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_total"), "The number of rows changed in the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedXIndexesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_x_indexes_total"), "The number of rows changed in the table, multiplied by the number of indexes changed.", []string{"schema", "table"}, nil, ) ) // ScrapeTableStat collects from `information_schema.table_statistics`. type ScrapeTableStat struct{} // Name of the Scraper. Should be unique. func (ScrapeTableStat) Name() string { return "info_schema.tablestats" } // Help describes the role of the Scraper. func (ScrapeTableStat) Help() string { return "If running with userstat=1, set to true to collect table statistics" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeTableStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed table stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaTableStatisticsRows, err := db.Query(tableStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string tableName string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &tableName, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, tableName, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_tablestats_test.go000066400000000000000000000037301336116511700276270ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeTableStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"TABLE_SCHEMA", "TABLE_NAME", "ROWS_READ", "ROWS_CHANGED", "ROWS_CHANGED_X_INDEXES"} rows := sqlmock.NewRows(columns). AddRow("mysql", "db", 238, 0, 8). AddRow("mysql", "proxies_priv", 99, 1, 0). AddRow("mysql", "user", 1064, 2, 5) mock.ExpectQuery(sanitizeQuery(tableStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeTableStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"schema": "mysql", "table": "db"}, value: 238}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 8}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 99}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 1}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 1064}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 2}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 5}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats.go000066400000000000000000000234321336116511700264600ustar00rootroot00000000000000// Scrape `information_schema.user_statistics`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const userStatQuery = `SELECT * FROM information_schema.user_statistics` var ( // Map known user-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaUserStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_connections"), "The number of connections created for this user.", []string{"user"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_concurrent_connections"), "The number of concurrent connections for this user.", []string{"user"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this user.", []string{"user"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this user.", []string{"user"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this user's connections.", []string{"user"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_received_total"), "The number of bytes received from this user’s connections.", []string{"user"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_sent_total"), "The number of bytes sent to this user’s connections.", []string{"user"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this user’s connections.", []string{"user"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_read_total"), "The number of rows read by this user's connections.", []string{"user"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_sent_total"), "The number of rows sent by this user's connections.", []string{"user"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_deleted_total"), "The number of rows deleted by this user's connections.", []string{"user"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_inserted_total"), "The number of rows inserted by this user's connections.", []string{"user"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_fetched_total"), "The number of rows fetched by this user’s connections.", []string{"user"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_updated_total"), "The number of rows updated by this user’s connections.", []string{"user"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_table_rows_read_total"), "The number of rows read from tables by this user’s connections. (It may be different from ROWS_FETCHED.)", []string{"user"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_select_commands_total"), "The number of SELECT commands executed from this user’s connections.", []string{"user"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_update_commands_total"), "The number of UPDATE commands executed from this user’s connections.", []string{"user"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_other_commands_total"), "The number of other commands executed from this user’s connections.", []string{"user"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this user’s connections.", []string{"user"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this user’s connections.", []string{"user"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_denied_connections_total"), "The number of connections denied to this user.", []string{"user"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_lost_connections_total"), "The number of this user’s connections that were terminated uncleanly.", []string{"user"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_access_denied_total"), "The number of times this user’s connections issued commands that were denied.", []string{"user"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_empty_queries_total"), "The number of times this user’s connections sent empty queries to the server.", []string{"user"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_ssl_connections_total"), "The number of times this user’s connections connected using SSL to the server.", []string{"user"}, nil)}, } ) // ScrapeUserStat collects from `information_schema.user_statistics`. type ScrapeUserStat struct{} // Name of the Scraper. Should be unique. func (ScrapeUserStat) Name() string { return "info_schema.userstats" } // Help describes the role of the Scraper. func (ScrapeUserStat) Help() string { return "If running with userstat=1, set to true to collect user statistics" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeUserStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed user stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaUserStatisticsRows, err := db.Query(userStatQuery) if err != nil { return err } defer informationSchemaUserStatisticsRows.Close() // The user column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the user column, userStatData[0] maps to columnNames[1] when reading off the metrics // (because userStatScanArgs is mapped as [ &user, &userData[0], &userData[1] ... &userdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] var columnNames []string columnNames, err = informationSchemaUserStatisticsRows.Columns() if err != nil { return err } var user string // Holds the username, which should be in column 0. var userStatData = make([]float64, len(columnNames)-1) // 1 less because of the user column. var userStatScanArgs = make([]interface{}, len(columnNames)) userStatScanArgs[0] = &user for i := range userStatData { userStatScanArgs[i+1] = &userStatData[i] } for informationSchemaUserStatisticsRows.Next() { err = informationSchemaUserStatisticsRows.Scan(userStatScanArgs...) if err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // user, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaUserStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(userStatData[idx]), user) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("user_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"user"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(userStatData[idx]), user) } } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/info_schema_userstats_test.go000066400000000000000000000074011336116511700275150ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeUserStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"USER", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("user_test", 1002, 0, 127027, 286, 245, float64(2565104853), 21090856, float64(2380108042), 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(userStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeUserStat{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"user": "user_test"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"user": "user_test"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: float64(2565104853), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: float64(2380108042), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema.go000066400000000000000000000001111336116511700243310ustar00rootroot00000000000000package collector // Subsystem. const performanceSchema = "perf_schema" prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_statements.go000066400000000000000000000214471336116511700302030ustar00rootroot00000000000000// Scrape `performance_schema.events_statements_summary_by_digest`. package collector import ( "database/sql" "fmt" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) const perfEventsStatementsQuery = ` SELECT ifnull(SCHEMA_NAME, 'NONE') as SCHEMA_NAME, DIGEST, LEFT(DIGEST_TEXT, %d) as DIGEST_TEXT, COUNT_STAR, SUM_TIMER_WAIT, SUM_ERRORS, SUM_WARNINGS, SUM_ROWS_AFFECTED, SUM_ROWS_SENT, SUM_ROWS_EXAMINED, SUM_CREATED_TMP_DISK_TABLES, SUM_CREATED_TMP_TABLES, SUM_SORT_MERGE_PASSES, SUM_SORT_ROWS, SUM_NO_INDEX_USED FROM ( SELECT * FROM performance_schema.events_statements_summary_by_digest WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') AND LAST_SEEN > DATE_SUB(NOW(), INTERVAL %d SECOND) ORDER BY LAST_SEEN DESC )Q GROUP BY Q.SCHEMA_NAME, Q.DIGEST, Q.DIGEST_TEXT, Q.COUNT_STAR, Q.SUM_TIMER_WAIT, Q.SUM_ERRORS, Q.SUM_WARNINGS, Q.SUM_ROWS_AFFECTED, Q.SUM_ROWS_SENT, Q.SUM_ROWS_EXAMINED, Q.SUM_CREATED_TMP_DISK_TABLES, Q.SUM_CREATED_TMP_TABLES, Q.SUM_SORT_MERGE_PASSES, Q.SUM_SORT_ROWS, Q.SUM_NO_INDEX_USED ORDER BY SUM_TIMER_WAIT DESC LIMIT %d ` // Tunable flags. var ( perfEventsStatementsLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.limit", "Limit the number of events statements digests by response time", ).Default("250").Int() perfEventsStatementsTimeLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.timelimit", "Limit how old the 'last_seen' events statements can be, in seconds", ).Default("86400").Int() perfEventsStatementsDigestTextLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.digest_text_limit", "Maximum length of the normalized statement text", ).Default("120").Int() ) // Metric descriptors. var ( performanceSchemaEventsStatementsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_total"), "The total count of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_seconds_total"), "The total time of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_errors_total"), "The errors of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsWarningsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_warnings_total"), "The warnings of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsAffectedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_affected_total"), "The total rows affected of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsSentDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_sent_total"), "The total rows sent of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsExaminedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_examined_total"), "The total rows examined of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_tables_total"), "The total tmp tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpDiskTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_disk_tables_total"), "The total tmp disk tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortMergePassesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_merge_passes_total"), "The total number of merge passes by the sort algorithm performed by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_rows_total"), "The total number of sorted rows by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsNoIndexUsedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_no_index_used_total"), "The total number of statements that used full table scans by digest.", []string{"schema", "digest", "digest_text"}, nil, ) ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. type ScrapePerfEventsStatements struct{} // Name of the Scraper. Should be unique. func (ScrapePerfEventsStatements) Name() string { return "perf_schema.eventsstatements" } // Help describes the role of the Scraper. func (ScrapePerfEventsStatements) Help() string { return "Collect metrics from performance_schema.events_statements_summary_by_digest" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfEventsStatements) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, *perfEventsStatementsDigestTextLimit, *perfEventsStatementsTimeLimit, *perfEventsStatementsLimit, ) // Timers here are returned in picoseconds. perfSchemaEventsStatementsRows, err := db.Query(perfQuery) if err != nil { return err } defer perfSchemaEventsStatementsRows.Close() var ( schemaName, digest, digestText string count, queryTime, errors, warnings uint64 rowsAffected, rowsSent, rowsExamined uint64 tmpTables, tmpDiskTables uint64 sortMergePasses, sortRows uint64 noIndexUsed uint64 ) for perfSchemaEventsStatementsRows.Next() { if err := perfSchemaEventsStatementsRows.Scan( &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, &sortMergePasses, &sortRows, &noIndexUsed, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsDesc, prometheus.CounterValue, float64(count), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTimeDesc, prometheus.CounterValue, float64(queryTime)/picoSeconds, schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsErrorsDesc, prometheus.CounterValue, float64(errors), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsWarningsDesc, prometheus.CounterValue, float64(warnings), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsAffectedDesc, prometheus.CounterValue, float64(rowsAffected), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsSentDesc, prometheus.CounterValue, float64(rowsSent), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsExaminedDesc, prometheus.CounterValue, float64(rowsExamined), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpTablesDesc, prometheus.CounterValue, float64(tmpTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpDiskTablesDesc, prometheus.CounterValue, float64(tmpDiskTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortMergePassesDesc, prometheus.CounterValue, float64(sortMergePasses), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortRowsDesc, prometheus.CounterValue, float64(sortRows), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsNoIndexUsedDesc, prometheus.CounterValue, float64(noIndexUsed), schemaName, digest, digestText, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_events_waits.go000066400000000000000000000041111336116511700271300ustar00rootroot00000000000000// Scrape `performance_schema.events_waits_summary_global_by_event_name`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfEventsWaitsQuery = ` SELECT EVENT_NAME, COUNT_STAR, SUM_TIMER_WAIT FROM performance_schema.events_waits_summary_global_by_event_name ` // Metric descriptors. var ( performanceSchemaEventsWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_total"), "The total events waits by event name.", []string{"event_name"}, nil, ) performanceSchemaEventsWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_seconds_total"), "The total seconds of events waits by event name.", []string{"event_name"}, nil, ) ) // ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`. type ScrapePerfEventsWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfEventsWaits) Name() string { return "perf_schema.eventswaits" } // Help describes the role of the Scraper. func (ScrapePerfEventsWaits) Help() string { return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfEventsWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery) if err != nil { return err } defer perfSchemaEventsWaitsRows.Close() var ( eventName string count, time uint64 ) for perfSchemaEventsWaitsRows.Next() { if err := perfSchemaEventsWaitsRows.Scan( &eventName, &count, &time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsDesc, prometheus.CounterValue, float64(count), eventName, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsTimeDesc, prometheus.CounterValue, float64(time)/picoSeconds, eventName, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_events.go000066400000000000000000000070251336116511700267270ustar00rootroot00000000000000// Scrape `performance_schema.file_summary_by_event_name`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfFileEventsQuery = ` SELECT EVENT_NAME, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM performance_schema.file_summary_by_event_name ` // Metric descriptors. var ( performanceSchemaFileEventsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_total"), "The total file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_seconds_total"), "The total seconds of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_bytes_total"), "The total bytes of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) ) // ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. type ScrapePerfFileEvents struct{} // Name of the Scraper. Should be unique. func (ScrapePerfFileEvents) Name() string { return "perf_schema.file_events" } // Help describes the role of the Scraper. func (ScrapePerfFileEvents) Help() string { return "Collect metrics from performance_schema.file_summary_by_event_name" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfFileEvents) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery) if err != nil { return err } defer perfSchemaFileEventsRows.Close() var ( eventName string countRead, timeRead, bytesRead uint64 countWrite, timeWrite, bytesWrite uint64 countMisc, timeMisc uint64 ) for perfSchemaFileEventsRows.Next() { if err := perfSchemaFileEventsRows.Scan( &eventName, &countRead, &timeRead, &bytesRead, &countWrite, &timeWrite, &bytesWrite, &countMisc, &timeMisc, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeRead)/picoSeconds, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeWrite)/picoSeconds, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countMisc), eventName, "misc", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeMisc)/picoSeconds, eventName, "misc", ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances.go000066400000000000000000000065111336116511700274110ustar00rootroot00000000000000// Scrape `performance_schema.file_summary_by_instance`. package collector import ( "database/sql" "strings" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) const perfFileInstancesQuery = ` SELECT FILE_NAME, EVENT_NAME, COUNT_READ, COUNT_WRITE, SUM_NUMBER_OF_BYTES_READ, SUM_NUMBER_OF_BYTES_WRITE FROM performance_schema.file_summary_by_instance where FILE_NAME REGEXP ? ` // Tunable flags. var ( performanceSchemaFileInstancesFilter = kingpin.Flag( "collect.perf_schema.file_instances.filter", "RegEx file_name filter for performance_schema.file_summary_by_instance", ).Default(".*").String() ) // Metric descriptors. var ( performanceSchemaFileInstancesRemovePrefix = kingpin.Flag( "collect.perf_schema.file_instances.remove_prefix", "Remove path prefix in performance_schema.file_summary_by_instance", ).Default("/var/lib/mysql/").String() performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_instances_bytes"), "The number of bytes processed by file read/write operations.", []string{"file_name", "event_name", "mode"}, nil, ) performanceSchemaFileInstancesCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_instances_total"), "The total number of file read/write operations.", []string{"file_name", "event_name", "mode"}, nil, ) ) // ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`. type ScrapePerfFileInstances struct{} // Name of the Scraper. Should be unique. func (ScrapePerfFileInstances) Name() string { return "perf_schema.file_instances" } // Help describes the role of the Scraper. func (ScrapePerfFileInstances) Help() string { return "Collect metrics from performance_schema.file_summary_by_instance" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfFileInstances) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileInstancesRows, err := db.Query(perfFileInstancesQuery, *performanceSchemaFileInstancesFilter) if err != nil { return err } defer perfSchemaFileInstancesRows.Close() var ( fileName, eventName string countRead, countWrite uint64 sumBytesRead, sumBytesWritten uint64 ) for perfSchemaFileInstancesRows.Next() { if err := perfSchemaFileInstancesRows.Scan( &fileName, &eventName, &countRead, &countWrite, &sumBytesRead, &sumBytesWritten, ); err != nil { return err } fileName = strings.TrimPrefix(fileName, *performanceSchemaFileInstancesRemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead), fileName, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countWrite), fileName, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesRead), fileName, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesWritten), fileName, eventName, "write", ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_file_instances_test.go000066400000000000000000000062251336116511700304520ustar00rootroot00000000000000package collector import ( "fmt" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" "gopkg.in/alecthomas/kingpin.v2" ) func TestScrapePerfFileInstances(t *testing.T) { _, err := kingpin.CommandLine.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"FILE_NAME", "EVENT_NAME", "COUNT_READ", "COUNT_WRITE", "SUM_NUMBER_OF_BYTES_READ", "SUM_NUMBER_OF_BYTES_WRITE"} rows := sqlmock.NewRows(columns). AddRow("/var/lib/mysql/db1/file", "event1", "3", "4", "725", "128"). AddRow("/var/lib/mysql/db2/file", "event2", "23", "12", "3123", "967"). AddRow("db3/file", "event3", "45", "32", "1337", "326") mock.ExpectQuery(sanitizeQuery(perfFileInstancesQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfFileInstances{}).Scrape(db, ch); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "read"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "write"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "read"}, value: 725, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "write"}, value: 128, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "read"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "write"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "read"}, value: 3123, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "write"}, value: 967, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "read"}, value: 45, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "write"}, value: 32, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "read"}, value: 1337, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "write"}, value: 326, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits.go000066400000000000000000000076761336116511700274450ustar00rootroot00000000000000// Scrape `performance_schema.table_io_waits_summary_by_index_usage`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfIndexIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, ifnull(INDEX_NAME, 'NONE') as INDEX_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_index_usage WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaIndexWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_total"), "The total number of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) performanceSchemaIndexWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_seconds_total"), "The total time of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) ) // ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`. type ScrapePerfIndexIOWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfIndexIOWaits) Name() string { return "perf_schema.indexiowaits" } // Help describes the role of the Scraper. func (ScrapePerfIndexIOWaits) Help() string { return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfIndexIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaIndexWaitsRows, err := db.Query(perfIndexIOWaitsQuery) if err != nil { return err } defer perfSchemaIndexWaitsRows.Close() var ( objectSchema, objectName, indexName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaIndexWaitsRows.Next() { if err := perfSchemaIndexWaitsRows.Scan( &objectSchema, &objectName, &indexName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, indexName, "fetch", ) // We only include the insert column when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, indexName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, indexName, "fetch", ) // We only update write columns when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, indexName, "delete", ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_index_io_waits_test.go000066400000000000000000000071221336116511700304660ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapePerfIndexIOWaits(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"OBJECT_SCHEMA", "OBJECT_NAME", "INDEX_NAME", "COUNT_FETCH", "COUNT_INSERT", "COUNT_UPDATE", "COUNT_DELETE", "SUM_TIMER_FETCH", "SUM_TIMER_INSERT", "SUM_TIMER_UPDATE", "SUM_TIMER_DELETE"} rows := sqlmock.NewRows(columns). // Note, timers are in picoseconds. AddRow("database", "table", "index", "10", "11", "12", "13", "14000000000000", "15000000000000", "16000000000000", "17000000000000"). AddRow("database", "table", "NONE", "20", "21", "22", "23", "24000000000000", "25000000000000", "26000000000000", "27000000000000") mock.ExpectQuery(sanitizeQuery(perfIndexIOWaitsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfIndexIOWaits{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 10, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 13, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 14, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 16, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 17, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 20, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 21, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 22, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 24, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 25, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 26, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 27, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_replication_group_member_stats.go000066400000000000000000000072611336116511700327200ustar00rootroot00000000000000package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfReplicationGroupMemeberStatsQuery = ` SELECT MEMBER_ID,COUNT_TRANSACTIONS_IN_QUEUE,COUNT_TRANSACTIONS_CHECKED,COUNT_CONFLICTS_DETECTED,COUNT_TRANSACTIONS_ROWS_VALIDATING FROM performance_schema.replication_group_member_stats ` // Metric descriptors. var ( performanceSchemaReplicationGroupMemberStatsTransInQueueDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "transaction_in_queue"), "The number of transactions in the queue pending conflict detection checks. Once the "+ "transactions have been checked for conflicts, if they pass the check, they are queued to be applied as well.", []string{"member_id"}, nil, ) performanceSchemaReplicationGroupMemberStatsTransCheckedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "transaction_checked"), "The number of transactions that have been checked for conflicts.", []string{"member_id"}, nil, ) performanceSchemaReplicationGroupMemberStatsConflictsDetectedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "conflicts_detected"), "The number of transactions that did not pass the conflict detection check.", []string{"member_id"}, nil, ) performanceSchemaReplicationGroupMemberStatsTransRowValidatingDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "transaction_rows_validating"), "The current size of the conflict detection database (against which each transaction is certified).", []string{"member_id"}, nil, ) ) // ScrapeReplicationGroupMemberStats collects from `performance_schema.replication_group_member_stats`. type ScrapePerfReplicationGroupMemberStats struct{} // Name of the Scraper. Should be unique. func (ScrapePerfReplicationGroupMemberStats) Name() string { return performanceSchema + ".replication_group_member_stats" } // Help describes the role of the Scraper. func (ScrapePerfReplicationGroupMemberStats) Help() string { return "Collect metrics from performance_schema.replication_group_member_stats" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfReplicationGroupMemberStats) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfReplicationGroupMemeberStatsRows, err := db.Query(perfReplicationGroupMemeberStatsQuery) if err != nil { return err } defer perfReplicationGroupMemeberStatsRows.Close() var ( memberId string countTransactionsInQueue, countTransactionsChecked uint64 countConflictsDetected, countTransactionsRowsValidating uint64 ) for perfReplicationGroupMemeberStatsRows.Next() { if err := perfReplicationGroupMemeberStatsRows.Scan( &memberId, &countTransactionsInQueue, &countTransactionsChecked, &countConflictsDetected, &countTransactionsRowsValidating, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationGroupMemberStatsTransInQueueDesc, prometheus.CounterValue, float64(countTransactionsInQueue), memberId, ) ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationGroupMemberStatsTransCheckedDesc, prometheus.CounterValue, float64(countTransactionsChecked), memberId, ) ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationGroupMemberStatsConflictsDetectedDesc, prometheus.CounterValue, float64(countConflictsDetected), memberId, ) ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationGroupMemberStatsTransRowValidatingDesc, prometheus.CounterValue, float64(countTransactionsRowsValidating), memberId, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_io_waits.go000066400000000000000000000071121336116511700274060ustar00rootroot00000000000000// Scrape `performance_schema.table_io_waits_summary_by_table`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfTableIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaTableWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_total"), "The total number of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaTableWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_seconds_total"), "The total time of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`. type ScrapePerfTableIOWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfTableIOWaits) Name() string { return "perf_schema.tableiowaits" } // Help describes the role of the Scraper. func (ScrapePerfTableIOWaits) Help() string { return "Collect metrics from performance_schema.table_io_waits_summary_by_table" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfTableIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableWaitsRows, err := db.Query(perfTableIOWaitsQuery) if err != nil { return err } defer perfSchemaTableWaitsRows.Close() var ( objectSchema, objectName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaTableWaitsRows.Next() { if err := perfSchemaTableWaitsRows.Scan( &objectSchema, &objectName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, "delete", ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/perf_schema_table_lock_waits.go000066400000000000000000000202741336116511700277330ustar00rootroot00000000000000// Scrape `performance_schema.table_lock_waits_summary_by_table`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfTableLockWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_READ_NORMAL, COUNT_READ_WITH_SHARED_LOCKS, COUNT_READ_HIGH_PRIORITY, COUNT_READ_NO_INSERT, COUNT_READ_EXTERNAL, COUNT_WRITE_ALLOW_WRITE, COUNT_WRITE_CONCURRENT_INSERT, COUNT_WRITE_LOW_PRIORITY, COUNT_WRITE_NORMAL, COUNT_WRITE_EXTERNAL, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM performance_schema.table_lock_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema', 'information_schema') ` // Metric descriptors. var ( performanceSchemaSQLTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_total"), "The total number of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_total"), "The total number of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaSQLTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_seconds_total"), "The total time of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_seconds_total"), "The total time of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`. type ScrapePerfTableLockWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfTableLockWaits) Name() string { return "perf_schema.tablelocks" } // Help describes the role of the Scraper. func (ScrapePerfTableLockWaits) Help() string { return "Collect metrics from performance_schema.table_lock_waits_summary_by_table" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfTableLockWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableLockWaitsRows, err := db.Query(perfTableLockWaitsQuery) if err != nil { return err } defer perfSchemaTableLockWaitsRows.Close() var ( objectSchema string objectName string countReadNormal uint64 countReadWithSharedLocks uint64 countReadHighPriority uint64 countReadNoInsert uint64 countReadExternal uint64 countWriteAllowWrite uint64 countWriteConcurrentInsert uint64 countWriteLowPriority uint64 countWriteNormal uint64 countWriteExternal uint64 timeReadNormal uint64 timeReadWithSharedLocks uint64 timeReadHighPriority uint64 timeReadNoInsert uint64 timeReadExternal uint64 timeWriteAllowWrite uint64 timeWriteConcurrentInsert uint64 timeWriteLowPriority uint64 timeWriteNormal uint64 timeWriteExternal uint64 ) for perfSchemaTableLockWaitsRows.Next() { if err := perfSchemaTableLockWaitsRows.Scan( &objectSchema, &objectName, &countReadNormal, &countReadWithSharedLocks, &countReadHighPriority, &countReadNoInsert, &countReadExternal, &countWriteAllowWrite, &countWriteConcurrentInsert, &countWriteLowPriority, &countWriteNormal, &countWriteExternal, &timeReadNormal, &timeReadWithSharedLocks, &timeReadHighPriority, &timeReadNoInsert, &timeReadExternal, &timeWriteAllowWrite, &timeWriteConcurrentInsert, &timeWriteLowPriority, &timeWriteNormal, &timeWriteExternal, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNormal), objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadWithSharedLocks), objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadHighPriority), objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNoInsert), objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteNormal), objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteAllowWrite), objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteConcurrentInsert), objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteLowPriority), objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countReadExternal), objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteExternal), objectSchema, objectName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNormal)/picoSeconds, objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadWithSharedLocks)/picoSeconds, objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadHighPriority)/picoSeconds, objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNoInsert)/picoSeconds, objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteNormal)/picoSeconds, objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteAllowWrite)/picoSeconds, objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteConcurrentInsert)/picoSeconds, objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteLowPriority)/picoSeconds, objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadExternal)/picoSeconds, objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteExternal)/picoSeconds, objectSchema, objectName, "write", ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/scraper.go000066400000000000000000000011031336116511700235160ustar00rootroot00000000000000package collector import ( "database/sql" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" ) // Scraper is minimal interface that let's you add new prometheus metrics to mysqld_exporter. type Scraper interface { // Name of the Scraper. Should be unique. Name() string // Help describes the role of the Scraper. // Example: "Collect from SHOW ENGINE INNODB STATUS" Help() string // Scrape collects data from database connection and sends it over channel as prometheus metric. Scrape(db *sql.DB, ch chan<- prometheus.Metric) error } prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts.go000066400000000000000000000050601336116511700244170ustar00rootroot00000000000000// Scrape heartbeat data. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" "github.com/satori/go.uuid" ) const ( // slavehosts is the Metric subsystem we use. slavehosts = "slave_hosts" // heartbeatQuery is the query used to fetch the stored and current // timestamps. %s will be replaced by the database and table name. // The second column allows gets the server timestamp at the exact same // time the query is run. slaveHostsQuery = "SHOW SLAVE HOSTS" ) // Metric descriptors. var ( SlaveHostsInfo = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "mysql_slave_hosts_info"), "Information about running slaves", []string{"server_id", "slave_host", "port", "master_id", "slave_uuid"}, nil, ) ) // ScrapeSlaveHosts scrapes metrics about the replicating slaves. type ScrapeSlaveHosts struct{} // Name of the Scraper. Should be unique. func (ScrapeSlaveHosts) Name() string { return slavehosts } // Help describes the role of the Scraper. func (ScrapeSlaveHosts) Help() string { return "Scrape information from 'SHOW SLAVE HOSTS'" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeSlaveHosts) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { slaveHostsRows, err := db.Query(slaveHostsQuery) if err != nil { return err } defer slaveHostsRows.Close() // fields of row var serverId string var host string var port string var rrrOrMasterId string var slaveUuidOrMasterId string // Depends on the version of MySQL being scraped var masterId string var slaveUuid string for slaveHostsRows.Next() { // Newer versions of mysql have the following // Server_id, Host, Port, Master_id, Slave_UUID // Older versions of mysql have the following // Server_id, Host, Port, Rpl_recovery_rank, Master_id err := slaveHostsRows.Scan(&serverId, &host, &port, &rrrOrMasterId, &slaveUuidOrMasterId) if err != nil { return err } // Check to see if slaveUuidOrMasterId resembles a UUID or not // to find out if we are using an old version of MySQL if _, err = uuid.FromString(slaveUuidOrMasterId); err != nil { // We are running an older version of MySQL with no slave UUID slaveUuid = "" masterId = slaveUuidOrMasterId } else { // We are running a more recent version of MySQL slaveUuid = slaveUuidOrMasterId masterId = rrrOrMasterId } ch <- prometheus.MustNewConstMetric( SlaveHostsInfo, prometheus.GaugeValue, 1, serverId, host, port, masterId, slaveUuid, ) } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/slave_hosts_test.go000066400000000000000000000061751336116511700254660ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeSlaveHostsOldFormat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Server_id", "Host", "Port", "Rpl_recovery_rank", "Master_id"} rows := sqlmock.NewRows(columns). AddRow("380239978", "backup_server_1", "0", "1", "192168011"). AddRow("11882498", "backup_server_2", "0", "1", "192168011") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveHosts{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "380239978", "slave_host": "backup_server_1", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "11882498", "slave_host": "backup_server_2", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } func TestScrapeSlaveHostsNewFormat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Server_id", "Host", "Port", "Master_id", "Slave_UUID"} rows := sqlmock.NewRows(columns). AddRow("192168010", "iconnect2", "3306", "192168011", "14cb6624-7f93-11e0-b2c0-c80aa9429562"). AddRow("1921680101", "athena", "3306", "192168011", "07af4990-f41f-11df-a566-7ac56fdaf645") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveHosts{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "192168010", "slave_host": "iconnect2", "port": "3306", "master_id": "192168011", "slave_uuid": "14cb6624-7f93-11e0-b2c0-c80aa9429562"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "1921680101", "slave_host": "athena", "port": "3306", "master_id": "192168011", "slave_uuid": "07af4990-f41f-11df-a566-7ac56fdaf645"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status.go000066400000000000000000000061301336116511700246010ustar00rootroot00000000000000// Scrape `SHOW SLAVE STATUS`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. slaveStatus = "slave_status" ) var slaveStatusQueries = [2]string{"SHOW ALL SLAVES STATUS", "SHOW SLAVE STATUS"} var slaveStatusQuerySuffixes = [3]string{" NONBLOCKING", " NOLOCK", ""} func columnIndex(slaveCols []string, colName string) int { for idx := range slaveCols { if slaveCols[idx] == colName { return idx } } return -1 } func columnValue(scanArgs []interface{}, slaveCols []string, colName string) string { var columnIndex = columnIndex(slaveCols, colName) if columnIndex == -1 { return "" } return string(*scanArgs[columnIndex].(*sql.RawBytes)) } // ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`. type ScrapeSlaveStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeSlaveStatus) Name() string { return slaveStatus } // Help describes the role of the Scraper. func (ScrapeSlaveStatus) Help() string { return "Collect from SHOW SLAVE STATUS" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeSlaveStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error { var ( slaveStatusRows *sql.Rows err error ) // Try the both syntax for MySQL/Percona and MariaDB for _, query := range slaveStatusQueries { slaveStatusRows, err = db.Query(query) if err != nil { // MySQL/Percona // Leverage lock-free SHOW SLAVE STATUS by guessing the right suffix for _, suffix := range slaveStatusQuerySuffixes { slaveStatusRows, err = db.Query(fmt.Sprint(query, suffix)) if err == nil { break } } } else { // MariaDB break } } if err != nil { return err } defer slaveStatusRows.Close() slaveCols, err := slaveStatusRows.Columns() if err != nil { return err } for slaveStatusRows.Next() { // As the number of columns varies with mysqld versions, // and sql.Scan requires []interface{}, we need to create a // slice of pointers to the elements of slaveData. scanArgs := make([]interface{}, len(slaveCols)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } if err := slaveStatusRows.Scan(scanArgs...); err != nil { return err } masterUUID := columnValue(scanArgs, slaveCols, "Master_UUID") masterHost := columnValue(scanArgs, slaveCols, "Master_Host") channelName := columnValue(scanArgs, slaveCols, "Channel_Name") // MySQL & Percona connectionName := columnValue(scanArgs, slaveCols, "Connection_name") // MariaDB for i, col := range slaveCols { if value, ok := parseStatus(*scanArgs[i].(*sql.RawBytes)); ok { // Silently skip unparsable values. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, slaveStatus, strings.ToLower(col)), "Generic metric from SHOW SLAVE STATUS.", []string{"master_host", "master_uuid", "channel_name", "connection_name"}, nil, ), prometheus.UntypedValue, value, masterHost, masterUUID, channelName, connectionName, ) } } } return nil } prometheus-mysqld-exporter-0.11.0+ds/collector/slave_status_test.go000066400000000000000000000035231336116511700256430ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeSlaveStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Master_Host", "Read_Master_Log_Pos", "Slave_IO_Running", "Slave_SQL_Running", "Seconds_Behind_Master"} rows := sqlmock.NewRows(columns). AddRow("127.0.0.1", "1", "Connecting", "Yes", "2") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE STATUS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveStatus{}).Scrape(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 2, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } prometheus-mysqld-exporter-0.11.0+ds/docker-compose.yml000066400000000000000000000003451336116511700232060ustar00rootroot00000000000000# see CONTRIBUTING.md --- version: '3' services: mysql: image: ${MYSQL_IMAGE:-mysql/mysql-server:5.7} environment: - MYSQL_ALLOW_EMPTY_PASSWORD=yes - MYSQL_ROOT_HOST=% ports: - 127.0.0.1:3306:3306 prometheus-mysqld-exporter-0.11.0+ds/example.rules000066400000000000000000000062431336116511700222630ustar00rootroot00000000000000### # Sample prometheus rules/alerts for mysqld. # # NOTE: Please review these carefully as thresholds and behavior may not meet # your SLOs or labels. # ### # Recording Rules # Record slave lag seconds for pre-computed timeseries that takes # `mysql_slave_status_sql_delay` into account mysql_slave_lag_seconds = mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay # Record slave lag via heartbeat method mysql_heartbeat_lag_seconds = mysql_heartbeat_now_timestamp_seconds - mysql_heartbeat_stored_timestamp_seconds # Record "Transactions per second" # See: https://dev.mysql.com/doc/refman/5.7/en/glossary.html#glos_transaction job:mysql_transactions:rate5m = sum(rate(mysql_global_status_commands_total{command=~"(commit|rollback)"}[5m])) without (command) ### # Galera Alerts groups: - name: GaleraAlerts rules: - alert: MySQLGaleraNotReady expr: mysql_global_status_wsrep_ready != 1 for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not ready.' summary: Galera cluster node not ready - alert: MySQLGaleraOutOfSync expr: (mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync == 0) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4).' summary: Galera cluster node out of sync - alert: MySQLGaleraDonorFallingBehind expr: (mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue > 100) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) and is falling behind (queue size {{$value}}).' summary: xtradb cluster donor node falling behind - alert: MySQLReplicationNotRunning expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running == 0 for: 2m labels: severity: critical annotations: description: Slave replication (IO or SQL) has been down for more than 2 minutes. summary: Slave replication is not running - alert: MySQLReplicationLag expr: (mysql_slave_lag_seconds > 30) and on(instance) (predict_linear(mysql_slave_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: The mysql slave replication has fallen behind and is not recovering summary: MySQL slave replication is lagging - alert: MySQLReplicationLag expr: (mysql_heartbeat_lag_seconds > 30) and on(instance) (predict_linear(mysql_heartbeat_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: The mysql slave replication has fallen behind and is not recovering summary: MySQL slave replication is lagging - alert: MySQLInnoDBLogWaits expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10 labels: severity: warning annotations: description: The innodb logs are waiting for disk at a rate of {{$value}} / second summary: MySQL innodb log writes stalling prometheus-mysqld-exporter-0.11.0+ds/example.rules.yml000066400000000000000000000054151336116511700230630ustar00rootroot00000000000000groups: - name: example.rules rules: - record: mysql_slave_lag_seconds expr: mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay - record: mysql_heartbeat_lag_seconds expr: mysql_heartbeat_now_timestamp_seconds - mysql_heartbeat_stored_timestamp_seconds - record: job:mysql_transactions:rate5m expr: sum(rate(mysql_global_status_commands_total{command=~"(commit|rollback)"}[5m])) WITHOUT (command) - alert: MySQLGaleraNotReady expr: mysql_global_status_wsrep_ready != 1 for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not ready.' summary: Galera cluster node not ready - alert: MySQLGaleraOutOfSync expr: (mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync == 0) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4).' summary: Galera cluster node out of sync - alert: MySQLGaleraDonorFallingBehind expr: (mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue > 100) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) and is falling behind (queue size {{$value}}).' summary: xtradb cluster donor node falling behind - alert: MySQLReplicationNotRunning expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running == 0 for: 2m labels: severity: critical annotations: description: Slave replication (IO or SQL) has been down for more than 2 minutes. summary: Slave replication is not running - alert: MySQLReplicationLag expr: (mysql_slave_lag_seconds > 30) and ON(instance) (predict_linear(mysql_slave_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: The mysql slave replication has fallen behind and is not recovering summary: MySQL slave replication is lagging - alert: MySQLReplicationLag expr: (mysql_heartbeat_lag_seconds > 30) and ON(instance) (predict_linear(mysql_heartbeat_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: The mysql slave replication has fallen behind and is not recovering summary: MySQL slave replication is lagging - alert: MySQLInnoDBLogWaits expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10 labels: severity: warning annotations: description: The innodb logs are waiting for disk at a rate of {{$value}} / second summary: MySQL innodb log writes stalling prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter.go000066400000000000000000000164761336116511700233550ustar00rootroot00000000000000package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net/http" "os" "path" "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/log" "github.com/prometheus/common/version" "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/ini.v1" "github.com/prometheus/mysqld_exporter/collector" ) var ( listenAddress = kingpin.Flag( "web.listen-address", "Address to listen on for web interface and telemetry.", ).Default(":9104").String() metricPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", ).Default("/metrics").String() configMycnf = kingpin.Flag( "config.my-cnf", "Path to .my.cnf file to read MySQL credentials from.", ).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String() dsn string ) // scrapers lists all possible collection methods and if they should be enabled by default. var scrapers = map[collector.Scraper]bool{ collector.ScrapeGlobalStatus{}: true, collector.ScrapeGlobalVariables{}: true, collector.ScrapeSlaveStatus{}: true, collector.ScrapeProcesslist{}: false, collector.ScrapeTableSchema{}: true, collector.ScrapeInfoSchemaInnodbTablespaces{}: false, collector.ScrapeInnodbMetrics{}: false, collector.ScrapeAutoIncrementColumns{}: false, collector.ScrapeBinlogSize{}: false, collector.ScrapePerfTableIOWaits{}: false, collector.ScrapePerfIndexIOWaits{}: false, collector.ScrapePerfTableLockWaits{}: false, collector.ScrapePerfEventsStatements{}: false, collector.ScrapePerfEventsWaits{}: false, collector.ScrapePerfFileEvents{}: false, collector.ScrapePerfFileInstances{}: false, collector.ScrapePerfReplicationGroupMemberStats{}: false, collector.ScrapeUserStat{}: false, collector.ScrapeClientStat{}: false, collector.ScrapeTableStat{}: false, collector.ScrapeInnodbCmp{}: false, collector.ScrapeInnodbCmpMem{}: false, collector.ScrapeQueryResponseTime{}: false, collector.ScrapeEngineTokudbStatus{}: false, collector.ScrapeEngineInnodbStatus{}: false, collector.ScrapeHeartbeat{}: false, collector.ScrapeSlaveHosts{}: false, } func parseMycnf(config interface{}) (string, error) { var dsn string opts := ini.LoadOptions{ // MySQL ini file can have boolean keys. AllowBooleanKeys: true, } cfg, err := ini.LoadSources(opts, config) if err != nil { return dsn, fmt.Errorf("failed reading ini file: %s", err) } user := cfg.Section("client").Key("user").String() password := cfg.Section("client").Key("password").String() if (user == "") || (password == "") { return dsn, fmt.Errorf("no user or password specified under [client] in %s", config) } host := cfg.Section("client").Key("host").MustString("localhost") port := cfg.Section("client").Key("port").MustUint(3306) socket := cfg.Section("client").Key("socket").String() if socket != "" { dsn = fmt.Sprintf("%s:%s@unix(%s)/", user, password, socket) } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port) } sslCA := cfg.Section("client").Key("ssl-ca").String() sslCert := cfg.Section("client").Key("ssl-cert").String() sslKey := cfg.Section("client").Key("ssl-key").String() if sslCA != "" { if tlsErr := customizeTLS(sslCA, sslCert, sslKey); tlsErr != nil { tlsErr = fmt.Errorf("failed to register a custom TLS configuration for mysql dsn: %s", tlsErr) return dsn, tlsErr } dsn = fmt.Sprintf("%s?tls=custom", dsn) } log.Debugln(dsn) return dsn, nil } func customizeTLS(sslCA string, sslCert string, sslKey string) error { var tlsCfg tls.Config caBundle := x509.NewCertPool() pemCA, err := ioutil.ReadFile(sslCA) if err != nil { return err } if ok := caBundle.AppendCertsFromPEM(pemCA); ok { tlsCfg.RootCAs = caBundle } else { return fmt.Errorf("failed parse pem-encoded CA certificates from %s", sslCA) } if sslCert != "" && sslKey != "" { certPairs := make([]tls.Certificate, 0, 1) keypair, err := tls.LoadX509KeyPair(sslCert, sslKey) if err != nil { return fmt.Errorf("failed to parse pem-encoded SSL cert %s or SSL key %s: %s", sslCert, sslKey, err) } certPairs = append(certPairs, keypair) tlsCfg.Certificates = certPairs } mysql.RegisterTLSConfig("custom", &tlsCfg) return nil } func init() { prometheus.MustRegister(version.NewCollector("mysqld_exporter")) } func newHandler(metrics collector.Metrics, scrapers []collector.Scraper) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { filteredScrapers := scrapers params := r.URL.Query()["collect[]"] log.Debugln("collect query:", params) // Check if we have some "collect[]" query parameters. if len(params) > 0 { filters := make(map[string]bool) for _, param := range params { filters[param] = true } filteredScrapers = nil for _, scraper := range scrapers { if filters[scraper.Name()] { filteredScrapers = append(filteredScrapers, scraper) } } } registry := prometheus.NewRegistry() registry.MustRegister(collector.New(dsn, metrics, filteredScrapers)) gatherers := prometheus.Gatherers{ prometheus.DefaultGatherer, registry, } // Delegate http serving to Prometheus client library, which will call collector.Collect. h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{}) h.ServeHTTP(w, r) } } func main() { // Generate ON/OFF flags for all scrapers. scraperFlags := map[collector.Scraper]*bool{} for scraper, enabledByDefault := range scrapers { defaultOn := "false" if enabledByDefault { defaultOn = "true" } f := kingpin.Flag( "collect."+scraper.Name(), scraper.Help(), ).Default(defaultOn).Bool() scraperFlags[scraper] = f } // Parse flags. log.AddFlags(kingpin.CommandLine) kingpin.Version(version.Print("mysqld_exporter")) kingpin.HelpFlag.Short('h') kingpin.Parse() // landingPage contains the HTML served at '/'. // TODO: Make this nicer and more informative. var landingPage = []byte(` MySQLd exporter

MySQLd exporter

Metrics

`) log.Infoln("Starting mysqld_exporter", version.Info()) log.Infoln("Build context", version.BuildContext()) dsn = os.Getenv("DATA_SOURCE_NAME") if len(dsn) == 0 { var err error if dsn, err = parseMycnf(*configMycnf); err != nil { log.Fatal(err) } } // Register only scrapers enabled by flag. log.Infof("Enabled scrapers:") enabledScrapers := []collector.Scraper{} for scraper, enabled := range scraperFlags { if *enabled { log.Infof(" --collect.%s", scraper.Name()) enabledScrapers = append(enabledScrapers, scraper) } } handlerFunc := newHandler(collector.NewMetrics(), enabledScrapers) http.HandleFunc(*metricPath, prometheus.InstrumentHandlerFunc("metrics", handlerFunc)) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write(landingPage) }) log.Infoln("Listening on", *listenAddress) log.Fatal(http.ListenAndServe(*listenAddress, nil)) } prometheus-mysqld-exporter-0.11.0+ds/mysqld_exporter_test.go000066400000000000000000000151661336116511700244070ustar00rootroot00000000000000package main import ( "context" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "os/exec" "reflect" "runtime" "strings" "syscall" "testing" "time" "github.com/smartystreets/goconvey/convey" ) func TestParseMycnf(t *testing.T) { const ( tcpConfig = ` [client] user = root password = abc123 ` tcpConfig2 = ` [client] user = root password = abc123 port = 3308 ` socketConfig = ` [client] user = user password = pass socket = /var/lib/mysql/mysql.sock ` socketConfig2 = ` [client] user = dude password = nopassword # host and port will not be used because of socket presence host = 1.2.3.4 port = 3307 socket = /var/lib/mysql/mysql.sock ` remoteConfig = ` [client] user = dude password = nopassword host = 1.2.3.4 port = 3307 ` ignoreBooleanKeys = ` [client] user = root password = abc123 [mysql] skip-auto-rehash ` badConfig = ` [client] user = root ` badConfig2 = ` [client] password = abc123 socket = /var/lib/mysql/mysql.sock ` badConfig3 = ` [hello] world = ismine ` badConfig4 = `[hello` ) convey.Convey("Various .my.cnf configurations", t, func() { convey.Convey("Local tcp connection", func() { dsn, _ := parseMycnf([]byte(tcpConfig)) convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3306)/") }) convey.Convey("Local tcp connection on non-default port", func() { dsn, _ := parseMycnf([]byte(tcpConfig2)) convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3308)/") }) convey.Convey("Socket connection", func() { dsn, _ := parseMycnf([]byte(socketConfig)) convey.So(dsn, convey.ShouldEqual, "user:pass@unix(/var/lib/mysql/mysql.sock)/") }) convey.Convey("Socket connection ignoring defined host", func() { dsn, _ := parseMycnf([]byte(socketConfig2)) convey.So(dsn, convey.ShouldEqual, "dude:nopassword@unix(/var/lib/mysql/mysql.sock)/") }) convey.Convey("Remote connection", func() { dsn, _ := parseMycnf([]byte(remoteConfig)) convey.So(dsn, convey.ShouldEqual, "dude:nopassword@tcp(1.2.3.4:3307)/") }) convey.Convey("Ignore boolean keys", func() { dsn, _ := parseMycnf([]byte(ignoreBooleanKeys)) convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3306)/") }) convey.Convey("Missed user", func() { _, err := parseMycnf([]byte(badConfig)) convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig)) }) convey.Convey("Missed password", func() { _, err := parseMycnf([]byte(badConfig2)) convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig2)) }) convey.Convey("No [client] section", func() { _, err := parseMycnf([]byte(badConfig3)) convey.So(err, convey.ShouldBeError, fmt.Errorf("no user or password specified under [client] in %s", badConfig3)) }) convey.Convey("Invalid config", func() { _, err := parseMycnf([]byte(badConfig4)) convey.So(err, convey.ShouldBeError, fmt.Errorf("failed reading ini file: unclosed section: %s", badConfig4)) }) }) } // bin stores information about path of executable and attached port type bin struct { path string port int } // TestBin builds, runs and tests binary. func TestBin(t *testing.T) { var err error binName := "mysqld_exporter" binDir, err := ioutil.TempDir("/tmp", binName+"-test-bindir-") if err != nil { t.Fatal(err) } defer func() { err := os.RemoveAll(binDir) if err != nil { t.Fatal(err) } }() importpath := "github.com/prometheus/mysqld_exporter/vendor/github.com/prometheus/common" path := binDir + "/" + binName xVariables := map[string]string{ importpath + "/version.Version": "gotest-version", importpath + "/version.Branch": "gotest-branch", importpath + "/version.Revision": "gotest-revision", } var ldflags []string for x, value := range xVariables { ldflags = append(ldflags, fmt.Sprintf("-X %s=%s", x, value)) } cmd := exec.Command( "go", "build", "-o", path, "-ldflags", strings.Join(ldflags, " "), ) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { t.Fatalf("Failed to build: %s", err) } tests := []func(*testing.T, bin){ testLandingPage, } portStart := 56000 t.Run(binName, func(t *testing.T) { for _, f := range tests { f := f // capture range variable fName := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() portStart++ data := bin{ path: path, port: portStart, } t.Run(fName, func(t *testing.T) { t.Parallel() f(t, data) }) } }) } func testLandingPage(t *testing.T, data bin) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() // Run exporter. cmd := exec.CommandContext( ctx, data.path, "--web.listen-address", fmt.Sprintf(":%d", data.port), ) cmd.Env = append(os.Environ(), "DATA_SOURCE_NAME=127.0.0.1:3306") if err := cmd.Start(); err != nil { t.Fatal(err) } defer cmd.Wait() defer cmd.Process.Kill() // Get the main page. urlToGet := fmt.Sprintf("http://127.0.0.1:%d", data.port) body, err := waitForBody(urlToGet) if err != nil { t.Fatal(err) } got := string(body) expected := ` MySQLd exporter

MySQLd exporter

Metrics

` if got != expected { t.Fatalf("got '%s' but expected '%s'", got, expected) } } // waitForBody is a helper function which makes http calls until http server is up // and then returns body of the successful call. func waitForBody(urlToGet string) (body []byte, err error) { tries := 60 // Get data, but we need to wait a bit for http server. for i := 0; i <= tries; i++ { // Try to get web page. body, err = getBody(urlToGet) if err == nil { return body, err } // If there is a syscall.ECONNREFUSED error (web server not available) then retry. if urlError, ok := err.(*url.Error); ok { if opError, ok := urlError.Err.(*net.OpError); ok { if osSyscallError, ok := opError.Err.(*os.SyscallError); ok { if osSyscallError.Err == syscall.ECONNREFUSED { time.Sleep(1 * time.Second) continue } } } } // There was an error, and it wasn't syscall.ECONNREFUSED. return nil, err } return nil, fmt.Errorf("failed to GET %s after %d tries: %s", urlToGet, tries, err) } // getBody is a helper function which retrieves http body from given address. func getBody(urlToGet string) ([]byte, error) { resp, err := http.Get(urlToGet) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil } prometheus-mysqld-exporter-0.11.0+ds/test_image.sh000077500000000000000000000012521336116511700222270ustar00rootroot00000000000000#!/bin/bash set -exo pipefail docker_image=$1 port=$2 container_id='' wait_start() { for in in {1..10}; do if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then docker_cleanup exit 0 else sleep 1 fi done exit 1 } docker_start() { container_id=$(docker run -d --network mysql-test -e DATA_SOURCE_NAME="root:secret@(mysql-test:3306)/" -p "${port}":"${port}" "${docker_image}") } docker_cleanup() { docker kill "${container_id}" } if [[ "$#" -ne 2 ]] ; then echo "Usage: $0 quay.io/prometheus/mysqld-exporter:v0.10.0 9104" >&2 exit 1 fi docker_start wait_start prometheus-mysqld-exporter-0.11.0+ds/vendor/000077500000000000000000000000001336116511700210445ustar00rootroot00000000000000prometheus-mysqld-exporter-0.11.0+ds/vendor/vendor.json000066400000000000000000000212161336116511700232360ustar00rootroot00000000000000{ "comment": "", "ignore": "test", "package": [ { "path": "appengine/cloudsql", "revision": "" }, { "checksumSHA1": "KmjnydoAbofMieIWm+it5OWERaM=", "path": "github.com/alecthomas/template", "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", "revisionTime": "2016-04-05T07:15:01Z" }, { "checksumSHA1": "3wt0pTXXeS+S93unwhGoLIyGX/Q=", "path": "github.com/alecthomas/template/parse", "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", "revisionTime": "2016-04-05T07:15:01Z" }, { "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=", "path": "github.com/alecthomas/units", "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a", "revisionTime": "2015-10-22T06:55:26Z" }, { "checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=", "path": "github.com/beorn7/perks/quantile", "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9", "revisionTime": "2016-08-04T10:47:26Z" }, { "checksumSHA1": "kxzfnN+IPF0sViI9iiUnI7aZVWM=", "path": "github.com/go-sql-driver/mysql", "revision": "d523deb1b23d913de5bdada721a6071e71283618", "revisionTime": "2018-06-03T12:45:54Z", "version": "v1.4", "versionExact": "v1.4.0" }, { "checksumSHA1": "Pyou8mceOASSFxc7GeXZuVdSMi0=", "path": "github.com/golang/protobuf/proto", "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265", "revisionTime": "2018-04-30T18:52:41Z", "version": "v1.1.0", "versionExact": "v1.1.0" }, { "checksumSHA1": "yIkYzW7bzAD81zHyuCNmEj4+oxQ=", "path": "github.com/gopherjs/gopherjs/js", "revision": "dc374d32704510cb387457180ca9d5193978b555", "revisionTime": "2017-06-09T00:26:10Z" }, { "checksumSHA1": "Js/yx9fZ3+wH1wZpHNIxSTMIaCg=", "path": "github.com/jtolds/gls", "revision": "77f18212c9c7edc9bd6a33d383a7b545ce62f064", "revisionTime": "2017-05-03T22:40:06Z", "version": "v4.2.1", "versionExact": "v4.2.1" }, { "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", "revisionTime": "2016-04-24T11:30:07Z", "version": "v1.0.1", "versionExact": "v1.0.1" }, { "checksumSHA1": "WVgL9pNO2RZCCcaXfSYSNEPgtCo=", "path": "github.com/prometheus/client_golang/prometheus", "revision": "77e8f2ddcfed59ece3a8151879efb2304b5cbbcf", "revisionTime": "2018-06-23T15:59:54Z" }, { "checksumSHA1": "MYqKV5uVTfCxP9zBug7naBQ1vr8=", "path": "github.com/prometheus/client_golang/prometheus/promhttp", "revision": "77e8f2ddcfed59ece3a8151879efb2304b5cbbcf", "revisionTime": "2018-06-23T15:59:54Z" }, { "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", "path": "github.com/prometheus/client_model/go", "revision": "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c", "revisionTime": "2017-11-17T10:05:41Z" }, { "checksumSHA1": "vPdC/DzEm7YbzRir2wwnpLPfay8=", "path": "github.com/prometheus/common/expfmt", "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=", "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "MGnqHnmEqc1fjnYiWReSiW8C27A=", "path": "github.com/prometheus/common/log", "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=", "path": "github.com/prometheus/common/model", "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=", "path": "github.com/prometheus/common/version", "revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revisionTime": "2018-05-18T15:47:59Z" }, { "checksumSHA1": "s8OGVwKHbYx/oNKNyZ8f7wWK0dA=", "path": "github.com/prometheus/procfs", "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", "revisionTime": "2018-06-12T22:21:13Z" }, { "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=", "path": "github.com/prometheus/procfs/internal/util", "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", "revisionTime": "2018-06-12T22:21:13Z" }, { "checksumSHA1": "HSP5hVT0CNMRa8+Xtz4z2Ic5U0E=", "path": "github.com/prometheus/procfs/nfs", "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", "revisionTime": "2018-06-12T22:21:13Z" }, { "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=", "path": "github.com/prometheus/procfs/xfs", "revision": "7d6f385de8bea29190f15ba9931442a0eaef9af7", "revisionTime": "2018-06-12T22:21:13Z" }, { "checksumSHA1": "+nVM+CEZGAopOrYlLifgWP+X01E=", "path": "github.com/satori/go.uuid", "revision": "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3", "revisionTime": "2018-01-03T03:42:45Z", "version": "v1.2.0", "versionExact": "v1.2.0" }, { "checksumSHA1": "GWtDi0sYbtCQzF/ZaVhaHvCMvuk=", "path": "github.com/sirupsen/logrus", "revision": "c155da19408a8799da419ed3eeb0cb5db0ad5dbc", "revisionTime": "2018-03-11T22:51:37Z", "version": "v1.0.5", "versionExact": "v1.0.5" }, { "checksumSHA1": "wVmkBavCZSwHYTDGxa1xOD3RKe0=", "path": "github.com/smartystreets/assertions", "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", "revisionTime": "2018-03-01T16:12:46Z", "version": "1.8.3", "versionExact": "1.8.3" }, { "checksumSHA1": "v6W3GIQMzr3QSXB2NtBa9X7SwiI=", "path": "github.com/smartystreets/assertions/internal/go-render/render", "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", "revisionTime": "2018-03-01T16:12:46Z", "version": "1.8.3", "versionExact": "1.8.3" }, { "checksumSHA1": "r6FauVdOTFnwYQgrKGFuWUbIAJE=", "path": "github.com/smartystreets/assertions/internal/oglematchers", "revision": "7678a5452ebea5b7090a6b163f844c133f523da2", "revisionTime": "2018-03-01T16:12:46Z", "version": "1.8.3", "versionExact": "1.8.3" }, { "checksumSHA1": "f4m09DHEetaanti/GqUJzyCBTaI=", "path": "github.com/smartystreets/goconvey/convey", "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", "revisionTime": "2017-06-02T16:46:21Z", "version": "1.6.3", "versionExact": "1.6.3" }, { "checksumSHA1": "9LakndErFi5uCXtY1KWl0iRnT4c=", "path": "github.com/smartystreets/goconvey/convey/gotest", "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", "revisionTime": "2017-06-02T16:46:21Z", "version": "1.6.3", "versionExact": "1.6.3" }, { "checksumSHA1": "FWDhk37bhAwZ2363D/L2xePwR64=", "path": "github.com/smartystreets/goconvey/convey/reporting", "revision": "9e8dc3f972df6c8fcc0375ef492c24d0bb204857", "revisionTime": "2017-06-02T16:46:21Z", "version": "1.6.3", "versionExact": "1.6.3" }, { "checksumSHA1": "BGm8lKZmvJbf/YOJLeL1rw2WVjA=", "path": "golang.org/x/crypto/ssh/terminal", "revision": "a49355c7e3f8fe157a85be2f77e6e269a0f89602", "revisionTime": "2018-06-20T09:14:27Z" }, { "checksumSHA1": "e66DmNWQKgI97tvj4BH7rHYnyJs=", "path": "golang.org/x/sys/unix", "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "zc2NI38L40/N4+pjd9P2ESz68/0=", "path": "golang.org/x/sys/windows", "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "P9OIhD26uWlIST/me4TYnvseCoY=", "path": "golang.org/x/sys/windows/registry", "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=", "path": "golang.org/x/sys/windows/svc/eventlog", "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "LiyXfqOzaeQ8vgYZH3t2hUEdVTw=", "path": "google.golang.org/appengine/cloudsql", "revision": "b1f26356af11148e710935ed1ac8a7f5702c7612", "revisionTime": "2018-05-21T22:34:13Z" }, { "checksumSHA1": "WfiM+grLatLDuXKj1roCcRDod/4=", "path": "gopkg.in/DATA-DOG/go-sqlmock.v1", "revision": "d76b18b42f285b792bf985118980ce9eacea9d10", "revisionTime": "2017-09-01T07:34:10Z" }, { "checksumSHA1": "sToCp8GThnMnsBzsHv+L/tBYQrQ=", "path": "gopkg.in/alecthomas/kingpin.v2", "revision": "947dcec5ba9c011838740e680966fd7087a71d0d", "revisionTime": "2017-12-17T18:08:21Z" }, { "checksumSHA1": "nv0VcXrE4cc3r10jK27JZYbr17E=", "path": "gopkg.in/ini.v1", "revision": "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5", "revisionTime": "2018-05-26T22:45:42Z" } ], "rootPath": "github.com/prometheus/mysqld_exporter" }