pax_global_header00006660000000000000000000000064127752221070014517gustar00rootroot0000000000000052 comment=69cb5a23dc8a1697ad5e4d009deba67784ff03f8 prometheus-mysqld-exporter-0.9.0+ds/000077500000000000000000000000001277522210700174775ustar00rootroot00000000000000prometheus-mysqld-exporter-0.9.0+ds/.gitignore000066400000000000000000000001231277522210700214630ustar00rootroot00000000000000/.build /mysqld_exporter /.release /.tarballs *.tar.gz *.test *-stamp .idea *.iml prometheus-mysqld-exporter-0.9.0+ds/.promu.yml000066400000000000000000000011711277522210700214420ustar00rootroot00000000000000repository: path: github.com/prometheus/mysqld_exporter build: flags: -a -tags netgo ldflags: | -X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE prometheus-mysqld-exporter-0.9.0+ds/.travis.yml000066400000000000000000000000761277522210700216130ustar00rootroot00000000000000sudo: false language: go go: - 1.5.4 - 1.6.2 script: - make prometheus-mysqld-exporter-0.9.0+ds/AUTHORS.md000066400000000000000000000005431277522210700211500ustar00rootroot00000000000000The Prometheus project was started by Matt T. Proud (emeritus) and Julius Volz in 2012. Maintainers of this repository: * Julius Volz * Brian Brazil The following individuals have contributed code to this repository (listed in alphabetical order): * Eugene Chertikhin prometheus-mysqld-exporter-0.9.0+ds/CHANGELOG.md000066400000000000000000000122011277522210700213040ustar00rootroot00000000000000## 0.9.0 / 2016-09-26 BREAKING CHANGES: * InnoDB buffer pool page stats have been renamed/fixed to better support aggregations (#130) * [FEATURE] scrape slave status for multisource replication #134 * [FEATURE] Add client statistics support (+ add tests on users & clients statistics) #138 * [IMPROVEMENT] Consistency of error logging. #144 * [IMPROVEMENT] Add label aggregation for innodb buffer metrics #130 * [IMPROVEMENT] Improved and fixed user/client statistics #149 * [FEATURE] Added the last binlog file number metric. #152 * [MISC] Add an example recording rules file #156 * [FEATURE] Added PXC/Galera info metrics. #155 * [FEATURE] Added metrics from SHOW ENGINE INNODB STATUS. #160 * [IMPROVEMENT] Fix wsrep_cluster_status #146 ## 0.8.1 / 2016-05-05 * [BUGFIX] Fix collect.info_schema.innodb_tablespaces #119 * [BUGFIX] Fix SLAVE STATUS "Connecting" #125 * [MISC] New release process using docker, circleci and a centralized building tool #120 * [MISC] Typos #121 ## 0.8.0 / 2016-04-19 BREAKING CHANGES: * global status `innodb_buffer_pool_pages` have been renamed/labeled. * innodb metrics `buffer_page_io` have been renamed/labeled. * [MISC] Add Travis CI automatic testing. * [MISC] Refactor mysqld_exporter.go into collector package. * [FEATURE] Add `mysql_up` metric (PR #99) * [FEATURE] Collect time metrics for processlist (PR #87) * [CHANGE] Separate innodb_buffer_pool_pages status metrics (PR #101) * [FEATURE] Added metrics from SHOW ENGINE TOKUDB STATUS (PR #103) * [CHANGE] Add special handling of "buffer_page_io" subsystem (PR #115) * [FEATURE] Add collector for innodb_sys_tablespaces (PR #116) ## 0.7.1 / 2016-02-16 * [IMPROVEMENT] Soft error on collector failure (PR #84) * [BUGFIX] Fix innodb_metrics collector (PR #85) * [BUGFIX] Parse auto increment values and maximum as float64 (PR #88) ## 0.7.0 / 2016-02-12 BREAKING CHANGES: * Global status metrics for "handlers" have been renamed * [FEATURE] New collector for `information_schema.table_statistics` (PR #57) * [FEATURE] New server version metric (PR #59) * [FEATURE] New collector for `information_schema.innodb_metrics` (PR #69) * [FEATURE] Read credentials from ".my.cnf" files (PR #77) * [FEATURE] New collector for query response time distribution (PR #79) * [FEATURE] Add minimum time flag for processlist metrics (PR #82) * [IMPROVEMENT] Collect more metrics from `performance_schema.events_statements_summary_by_digest` (PR #58) * [IMPROVEMENT] Add option to filter metrics queries from the slow log (PR #60) * [IMPROVEMENT] Leverage lock-free SHOW SLAVE STATUS (PR #61) * [IMPROVEMENT] Add labels to global status "handlers" counters (PR #68) * [IMPROVEMENT] Update Makefile.COMMON from utils repo (PR #73) * [BUGFIX] Fix broken error return in the scrape function and log an error (PR #64) * [BUGFIX] Check log_bin before running SHOW BINARY LOGS (PR #74) * [BUGFIX] Fixed uint for scrapeInnodbMetrics() and gofmt (PR #81) ## 0.6.0 / 2015-10-28 BREAKING CHANGES: * The digest_text mapping metric has been removed, now included in all digest metrics (PR #50) * Flags for timing metrics have been removed, now included with related counter flag (PR #48) * [FEATURE] New collector for metrics from information_schema.processlist (PR #34) * [FEATURE] New collector for binlog counts/sizes (PR #35) * [FEATURE] New collector for performance_schema.{file_summary_by_event_name,events_waits_summary_global_by_event_name} (PR #49) * [FEATURE] New collector for information_schema.tables (PR #51) * [IMPROVEMENT] All collection methods now have enable flags (PR #46) * [IMPROVEMENT] Consolidate performance_schema metrics flags (PR #48) * [IMPROVEMENT] Removed need for digest_text mapping metric (PR #50) * [IMPROVEMENT] Update docs (PR #52) ## 0.5.0 / 2015-09-22 * [FEATURE] Add metrics for table locks * [BUGFIX] Use uint64 to prevent int64 overflow * [BUGFIX] Correct picsecond times to correct second values ## 0.4.0 / 2015-09-21 * [CHANGE] Limit events_statements to recently used * [FEATURE] Add digest_text mapping metric * [IMPROVEMENT] General refactoring ## 0.3.0 / 2015-08-31 BREAKING CHANGES: Most metrics have been prefixed with Prometheus subsystem names to avoid conflicts between different collection methods. * [BUGFIX] Separate slave_status and global_status into separate subsystems. * [IMPROVEMENT] Refactor metrics creation. * [IMPROVEMENT] Add support for performance_schema.table_io_waits_summary_by_table collection. * [IMPROVEMENT] Add support for performance_schema.table_io_waits_summary_by_index_usage collection. * [IMPROVEMENT] Add support for performance_schema.events_statements_summary_by_digest collection. * [IMPROVEMENT] Add support for Percona userstats output collection. * [IMPROVEMENT] Add support for auto_increment column metrics collection. * [IMPROVEMENT] Add support for `SHOW GLOBAL VARIABLES` metrics collection. ## 0.2.0 / 2015-06-24 BREAKING CHANGES: Logging-related flags have changed. Metric names have changed. * [IMPROVEMENT] Add Docker support. * [CHANGE] Switch logging to Prometheus' logging library. * [BUGFIX] Fix slave status parsing. * [BUGFIX] Fix truncated numbers. * [CHANGE] Reorganize metrics names and types. ## 0.1.0 / 2015-05-05 * Initial release prometheus-mysqld-exporter-0.9.0+ds/CONTRIBUTING.md000066400000000000000000000015331277522210700217320ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) one or more of the maintainers (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). prometheus-mysqld-exporter-0.9.0+ds/Dockerfile000066400000000000000000000003361277522210700214730ustar00rootroot00000000000000FROM quay.io/prometheus/busybox:latest MAINTAINER The Prometheus Authors COPY mysqld_exporter /bin/mysqld_exporter EXPOSE 9104 ENTRYPOINT [ "/bin/mysqld_exporter" ] prometheus-mysqld-exporter-0.9.0+ds/LICENSE000066400000000000000000000260751277522210700205160ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-mysqld-exporter-0.9.0+ds/Makefile000066400000000000000000000032771277522210700211500ustar00rootroot00000000000000# Copyright 2015 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. GO := GO15VENDOREXPERIMENT=1 go PROMU := $(GOPATH)/bin/promu pkgs = $(shell $(GO) list ./... | grep -v /vendor/) PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_NAME ?= mysqld-exporter DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) all: format build test style: @echo ">> checking code style" @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' test: @echo ">> running tests" @$(GO) test -short -race $(pkgs) format: @echo ">> formatting code" @$(GO) fmt $(pkgs) vet: @echo ">> vetting code" @$(GO) vet $(pkgs) build: promu @echo ">> building binaries" @$(PROMU) build --prefix $(PREFIX) tarball: promu @echo ">> building release tarball" @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) docker: @echo ">> building docker image" @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . promu: @GOOS=$(shell uname -s | tr A-Z a-z) \ GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ $(GO) get -u github.com/prometheus/promu .PHONY: all style format build test vet tarball docker promu prometheus-mysqld-exporter-0.9.0+ds/NOTICE000066400000000000000000000001011277522210700203730ustar00rootroot00000000000000Exporter for MySQL daemon. Copyright 2015 The Prometheus Authors prometheus-mysqld-exporter-0.9.0+ds/README.md000066400000000000000000000152721277522210700207650ustar00rootroot00000000000000# MySQL Server Exporter [![Build Status](https://travis-ci.org/prometheus/mysqld_exporter.svg)][travis] [![CircleCI](https://circleci.com/gh/prometheus/mysqld_exporter/tree/master.svg?style=shield)][circleci] [![Docker Repository on Quay](https://quay.io/repository/prometheus/mysqld-exporter/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/mysqld-exporter.svg?maxAge=604800)][hub] Prometheus exporter for MySQL server metrics. Supported MySQL versions: 5.1 and up. NOTE: Not all collection methods are support on MySQL < 5.6 ## Building and running ### Required Grants CREATE USER 'exporter'@'localhost' IDENTIFIED BY 'XXXXXXXX'; GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'exporter'@'localhost' WITH MAX_USER_CONNECTIONS 3; NOTE: It is recommended to set a max connection limit for the user to avoid overloading the server with monitoring scrapes under heavy load. ### Build make ### Running Running using an environment variable: export DATA_SOURCE_NAME='login:password@(hostname:port)/' ./mysqld_exporter Running using ~/.my.cnf: ./mysqld_exporter ### Collector Flags Name | MySQL Version | Description -------------------------------------------------------|---------------|------------------------------------------------------------------------------------ collect.auto_increment.columns | 5.1 | Collect auto_increment columns and max values from information_schema. collect.binlog_size | 5.1 | Collect the current size of all registered binlog files collect.engine_innodb_status | 5.1 | Collect from SHOW ENGINE INNODB STATUS. collect.engine_tokudb_status | 5.6 | Collect from SHOW ENGINE TOKUDB STATUS. collect.global_status | 5.1 | Collect from SHOW GLOBAL STATUS (Enabled by default) collect.global_variables | 5.1 | Collect from SHOW GLOBAL VARIABLES (Enabled by default) collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics. collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics. collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces. collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist. collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0) collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON. collect.info_schema.tables | 5.1 | Collect metrics from information_schema.tables (Enabled by default) collect.info_schema.tables.databases | 5.1 | The list of databases to collect table stats for, or '`*`' for all. collect.info_schema.tablestats | 5.1 | If running with userstat=1, set to true to collect table statistics. collect.info_schema.userstats | 5.1 | If running with userstat=1, set to true to collect user statistics. collect.perf_schema.eventsstatements | 5.6 | Collect metrics from performance_schema.events_statements_summary_by_digest. collect.perf_schema.eventsstatements.digest_text_limit | 5.6 | Maximum length of the normalized statement text. (default: 120) collect.perf_schema.eventsstatements.limit | 5.6 | Limit the number of events statements digests by response time. (default: 250) collect.perf_schema.eventsstatements.timelimit | 5.6 | Limit how old the 'last_seen' events statements can be, in seconds. (default: 86400) collect.perf_schema.eventswaits | 5.5 | Collect metrics from performance_schema.events_waits_summary_global_by_event_name. collect.perf_schema.file_events | 5.6 | Collect metrics from performance_schema.file_summary_by_event_name. collect.perf_schema.indexiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage. collect.perf_schema.tableiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_table. collect.perf_schema.tablelocks | 5.6 | Collect metrics from performance_schema.table_lock_waits_summary_by_table. collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default) ### General Flags Name | Description -------------------------------------------|-------------------------------------------------------------------------------------------------- config.my-cnf | Path to .my.cnf file to read MySQL credentials from. (default: `~/.my.cnf`) log.level | Logging verbosity (default: info) log_slow_filter | Add a log_slow_filter to avoid exessive MySQL slow logging. NOTE: Not supported by Oracle MySQL. web.listen-address | Address to listen on for web interface and telemetry. web.telemetry-path | Path under which to expose metrics. version | Print the version information. ### Setting the MySQL server's data source name The MySQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_name) must be set via the `DATA_SOURCE_NAME` environment variable. The format of this variable is described at https://github.com/go-sql-driver/mysql#dsn-data-source-name. ## Using Docker You can deploy this exporter using the [prom/mysqld-exporter](https://registry.hub.docker.com/u/prom/mysqld-exporter/) Docker image. For example: ```bash docker pull prom/mysqld-exporter docker run -d -p 9104:9104 --link=my_mysql_container:bdd \ -e DATA_SOURCE_NAME="user:password@(bdd:3306)/database" prom/mysqld-exporter ``` ## Example Rules There are some sample rules available in [example.rules](example.rules) [circleci]: https://circleci.com/gh/prometheus/mysqld_exporter [hub]: https://hub.docker.com/r/prom/mysqld-exporter/ [travis]: https://travis-ci.org/prometheus/mysqld_exporter [quay]: https://quay.io/repository/prometheus/mysqld-exporter prometheus-mysqld-exporter-0.9.0+ds/VERSION000066400000000000000000000000061277522210700205430ustar00rootroot000000000000000.9.0 prometheus-mysqld-exporter-0.9.0+ds/circle.yml000066400000000000000000000044721277522210700214720ustar00rootroot00000000000000machine: environment: DOCKER_IMAGE_NAME: prom/mysqld-exporter QUAY_IMAGE_NAME: quay.io/prometheus/mysqld-exporter DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.6-base REPO_PATH: github.com/prometheus/mysqld_exporter pre: - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' - sudo chmod 0755 /usr/bin/docker - sudo curl -L 'https://github.com/aktau/github-release/releases/download/v0.6.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C $HOME/bin services: - docker dependencies: pre: - make promu - docker info override: - promu crossbuild - ln -s .build/linux-amd64/mysqld_exporter mysqld_exporter - | if [ -n "$CIRCLE_TAG" ]; then make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG else make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME fi post: - mkdir $CIRCLE_ARTIFACTS/binaries/ && cp -a .build/* $CIRCLE_ARTIFACTS/binaries/ - docker images test: override: - docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T deployment: hub_branch: branch: master owner: prometheus commands: - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - docker push $DOCKER_IMAGE_NAME - docker push $QUAY_IMAGE_NAME hub_tag: tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ owner: prometheus commands: - promu crossbuild tarballs - promu release .tarballs - mkdir $CIRCLE_ARTIFACTS/releases/ && cp -a .tarballs/* $CIRCLE_ARTIFACTS/releases/ - docker login -e $DOCKER_EMAIL -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - docker login -e $QUAY_EMAIL -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - | if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" fi - docker push $DOCKER_IMAGE_NAME - docker push $QUAY_IMAGE_NAME prometheus-mysqld-exporter-0.9.0+ds/collector/000077500000000000000000000000001277522210700214655ustar00rootroot00000000000000prometheus-mysqld-exporter-0.9.0+ds/collector/binlog.go000066400000000000000000000036071277522210700232740ustar00rootroot00000000000000// Scrape `SHOW BINARY LOGS` package collector import ( "database/sql" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. binlog = "binlog" // Queries. logbinQuery = `SELECT @@log_bin` binlogQuery = `SHOW BINARY LOGS` ) // Metric descriptors. var ( binlogSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "size_bytes"), "Combined size of all registered binlog files.", []string{}, nil, ) binlogFilesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "files"), "Number of registered binlog files.", []string{}, nil, ) binlogFileNumberDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "file_number"), "The last binlog file number.", []string{}, nil, ) ) // ScrapeBinlogSize colects from `SHOW BINARY LOGS`. func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error { var logBin uint8 err := db.QueryRow(logbinQuery).Scan(&logBin) if err != nil { return err } // If log_bin is OFF, do not run SHOW BINARY LOGS which explicitly produces MySQL error if logBin == 0 { return nil } masterLogRows, err := db.Query(binlogQuery) if err != nil { return err } defer masterLogRows.Close() var ( size uint64 count uint64 filename string filesize uint64 ) size = 0 count = 0 for masterLogRows.Next() { if err := masterLogRows.Scan(&filename, &filesize); err != nil { return nil } size += filesize count++ } ch <- prometheus.MustNewConstMetric( binlogSizeDesc, prometheus.GaugeValue, float64(size), ) ch <- prometheus.MustNewConstMetric( binlogFilesDesc, prometheus.GaugeValue, float64(count), ) // The last row contains the last binlog file number. value, _ := strconv.ParseFloat(strings.Split(filename, ".")[1], 64) ch <- prometheus.MustNewConstMetric( binlogFileNumberDesc, prometheus.GaugeValue, value, ) return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/binlog_test.go000066400000000000000000000027451277522210700243350ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeBinlogSize(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(logbinQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) columns := []string{"Log_name", "File_size"} rows := sqlmock.NewRows(columns). AddRow("centos6-bin.000001", "1813"). AddRow("centos6-bin.000002", "120"). AddRow("centos6-bin.000444", "573009") mock.ExpectQuery(sanitizeQuery(binlogQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeBinlogSize(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 574942, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 3, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 444, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/collector.go000066400000000000000000000030611277522210700240020ustar00rootroot00000000000000package collector import ( "bytes" "database/sql" "regexp" "strconv" "github.com/prometheus/client_golang/prometheus" ) const ( // Exporter namespace. namespace = "mysql" // Math constant for picoseconds to seconds. picoSeconds = 1e12 // Query to check whether user/table/client stats are enabled. userstatCheckQuery = `SHOW VARIABLES WHERE Variable_Name='userstat' OR Variable_Name='userstat_running'` ) var logRE = regexp.MustCompile(`.+\.(\d+)$`) func newDesc(subsystem, name, help string) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), help, nil, nil, ) } func parseStatus(data sql.RawBytes) (float64, bool) { if bytes.Compare(data, []byte("Yes")) == 0 || bytes.Compare(data, []byte("ON")) == 0 { return 1, true } if bytes.Compare(data, []byte("No")) == 0 || bytes.Compare(data, []byte("OFF")) == 0 { return 0, true } // SHOW SLAVE STATUS Slave_IO_Running can return "Connecting" which is a non-running state. if bytes.Compare(data, []byte("Connecting")) == 0 { return 0, true } // SHOW GLOBAL STATUS like 'wsrep_cluster_status' can return "Primary" or "Non-Primary"/"Disconnected" if bytes.Compare(data, []byte("Primary")) == 0 { return 1, true } if bytes.Compare(data, []byte("Non-Primary")) == 0 || bytes.Compare(data, []byte("Disconnected")) == 0 { return 0, true } if logNum := logRE.Find(data); logNum != nil { value, err := strconv.ParseFloat(string(logNum), 64) return value, err == nil } value, err := strconv.ParseFloat(string(data), 64) return value, err == nil } prometheus-mysqld-exporter-0.9.0+ds/collector/collector_test.go000066400000000000000000000021351277522210700250420ustar00rootroot00000000000000package collector import ( "strings" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) type labelMap map[string]string type MetricResult struct { labels labelMap value float64 metricType dto.MetricType } func readMetric(m prometheus.Metric) MetricResult { pb := &dto.Metric{} m.Write(pb) labels := make(labelMap, len(pb.Label)) for _, v := range pb.Label { labels[v.GetName()] = v.GetValue() } if pb.Gauge != nil { return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} } if pb.Counter != nil { return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} } if pb.Untyped != nil { return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} } panic("Unsupported metric type") } func sanitizeQuery(q string) string { q = strings.Join(strings.Fields(q), " ") q = strings.Replace(q, "(", "\\(", -1) q = strings.Replace(q, ")", "\\)", -1) q = strings.Replace(q, "*", "\\*", -1) return q } prometheus-mysqld-exporter-0.9.0+ds/collector/engine_innodb.go000066400000000000000000000034561277522210700246220ustar00rootroot00000000000000// Scrape `SHOW ENGINE INNODB STATUS`. package collector import ( "database/sql" "regexp" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. innodb = "engine_innodb" // Query. engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS` ) // ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`. func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { rows, err := db.Query(engineInnodbStatusQuery) if err != nil { return err } defer rows.Close() var typeCol, nameCol, statusCol string // First row should contain the necessary info. If many rows returned then it's unknown case. if rows.Next() { if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil { return err } } // 0 queries inside InnoDB, 0 queries in queue // 0 read views open inside InnoDB rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`) rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`) for _, line := range strings.Split(statusCol, "\n") { if data := rQueries.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."), prometheus.GaugeValue, value, ) value, _ = strconv.ParseFloat(data[2], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_in_queue", "Queries in queue."), prometheus.GaugeValue, value, ) } else if data := rViews.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."), prometheus.GaugeValue, value, ) } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/engine_innodb_test.go000066400000000000000000000132671277522210700256620ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeEngineInnodbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() sample := ` ===================================== 2016-09-14 19:04:38 0x7fed21462700 INNODB MONITOR OUTPUT ===================================== Per second averages calculated from the last 30 seconds ----------------- BACKGROUND THREAD ----------------- srv_master_thread loops: 1 srv_active, 0 srv_shutdown, 49166 srv_idle srv_master_thread log flush and writes: 49165 ---------- SEMAPHORES ---------- OS WAIT ARRAY INFO: reservation count 15 OS WAIT ARRAY INFO: signal count 12 RW-shared spins 0, rounds 4, OS waits 2 RW-excl spins 0, rounds 0, OS waits 0 RW-sx spins 0, rounds 0, OS waits 0 Spin rounds per wait: 4.00 RW-shared, 0.00 RW-excl, 0.00 RW-sx ------------ TRANSACTIONS ------------ Trx id counter 67843 Purge done for trx's n:o < 55764 undo n:o < 0 state: running but idle History list length 779 LIST OF TRANSACTIONS FOR EACH SESSION: ---TRANSACTION 422131596298608, not started 0 lock struct(s), heap size 1136, 0 row lock(s) -------- FILE I/O -------- I/O thread 0 state: waiting for completed aio requests (insert buffer thread) I/O thread 1 state: waiting for completed aio requests (log thread) I/O thread 2 state: waiting for completed aio requests (read thread) I/O thread 3 state: waiting for completed aio requests (read thread) I/O thread 4 state: waiting for completed aio requests (read thread) I/O thread 5 state: waiting for completed aio requests (read thread) I/O thread 6 state: waiting for completed aio requests (write thread) I/O thread 7 state: waiting for completed aio requests (write thread) I/O thread 8 state: waiting for completed aio requests (write thread) I/O thread 9 state: waiting for completed aio requests (write thread) Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , ibuf aio reads:, log i/o's:, sync i/o's: Pending flushes (fsync) log: 0; buffer pool: 0 512 OS file reads, 57 OS file writes, 8 OS fsyncs 0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s ------------------------------------- INSERT BUFFER AND ADAPTIVE HASH INDEX ------------------------------------- Ibuf: size 1, free list len 0, seg size 2, 0 merges merged operations: insert 0, delete mark 0, delete 0 discarded operations: insert 0, delete mark 0, delete 0 Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) 0.00 hash searches/s, 0.00 non-hash searches/s --- LOG --- Log sequence number 37771171 Log flushed up to 37771171 Pages flushed up to 37771171 Last checkpoint at 37771162 Max checkpoint age 80826164 Checkpoint age target 78300347 Modified age 0 Checkpoint age 9 0 pending log flushes, 0 pending chkp writes 10 log i/o's done, 0.00 log i/o's/second ---------------------- BUFFER POOL AND MEMORY ---------------------- Total large memory allocated 139722752 Dictionary memory allocated 367821 Internal hash tables (constant factor + variable factor) Adaptive hash index 2252736 (2219072 + 33664) Page hash 139112 (buffer pool 0 only) Dictionary cache 922589 (554768 + 367821) File system 839328 (812272 + 27056) Lock system 334008 (332872 + 1136) Recovery system 0 (0 + 0) Buffer pool size 8191 Buffer pool size, bytes 0 Free buffers 7684 Database pages 507 Old database pages 0 Modified db pages 0 Pending reads 0 Pending writes: LRU 0, flush list 0, single page 0 Pages made young 0, not young 0 0.00 youngs/s, 0.00 non-youngs/s Pages read 473, created 34, written 36 0.00 reads/s, 0.00 creates/s, 0.00 writes/s No buffer pool page gets since the last printout Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s LRU len: 507, unzip_LRU len: 0 I/O sum[0]:cur[0], unzip sum[0]:cur[0] -------------- ROW OPERATIONS -------------- 661 queries inside InnoDB, 10 queries in queue 15 read views open inside InnoDB 0 RW transactions active inside InnoDB Process ID=1, Main thread ID=140656308950784, state: sleeping Number of rows inserted 0, updated 0, deleted 0, read 12 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s ---------------------------- END OF INNODB MONITOR OUTPUT ============================ ` columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns).AddRow("InnoDB", "", sample) mock.ExpectQuery(sanitizeQuery(engineInnodbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeEngineInnodbStatus(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 661, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 15, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/engine_tokudb.go000066400000000000000000000024021277522210700246270ustar00rootroot00000000000000// Scrape `SHOW ENGINE TOKUDB STATUS`. package collector import ( "database/sql" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. tokudb = "engine_tokudb" // Query. engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS` ) func sanitizeTokudbMetric(metricName string) string { replacements := map[string]string{ ">": "", ",": "", ":": "", "(": "", ")": "", " ": "_", "-": "_", "+": "and", "/": "and", } for r := range replacements { metricName = strings.Replace(metricName, r, replacements[r], -1) } return metricName } // ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { tokudbRows, err := db.Query(engineTokudbStatusQuery) if err != nil { return err } defer tokudbRows.Close() var temp, key string var val sql.RawBytes for tokudbRows.Next() { if err := tokudbRows.Scan(&temp, &key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(tokudb, sanitizeTokudbMetric(key), "Generic metric from SHOW ENGINE TOKUDB STATUS."), prometheus.UntypedValue, floatVal, ) } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/engine_tokudb_test.go000066400000000000000000000051121277522210700256670ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestSanitizeTokudbMetric(t *testing.T) { samples := map[string]string{ "loader: number of calls to loader->close() that failed": "loader_number_of_calls_to_loader_close_that_failed", "ft: promotion: stopped anyway, after locking the child": "ft_promotion_stopped_anyway_after_locking_the_child", "ft: basement nodes deserialized with fixed-keysize": "ft_basement_nodes_deserialized_with_fixed_keysize", "memory: number of bytes used (requested + overhead)": "memory_number_of_bytes_used_requested_and_overhead", "ft: uncompressed / compressed bytes written (overall)": "ft_uncompressed_and_compressed_bytes_written_overall", } convey.Convey("Replacement tests", t, func() { for metric := range samples { got := sanitizeTokudbMetric(metric) convey.So(got, convey.ShouldEqual, samples[metric]) } }) } func TestScrapeEngineTokudbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns). AddRow("TokuDB", "indexer: number of calls to indexer->build() succeeded", "1"). AddRow("TokuDB", "ft: promotion: stopped anyway, after locking the child", "45316247"). AddRow("TokuDB", "memory: mallocator version", "3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784"). AddRow("TokuDB", "filesystem: most recent disk full", "Thu Jan 1 00:00:00 1970"). AddRow("TokuDB", "locktree: time spent ending the STO early (seconds)", "9115.904484") mock.ExpectQuery(sanitizeQuery(engineTokudbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeEngineTokudbStatus(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 45316247, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 9115.904484, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/global_status.go000066400000000000000000000107341277522210700246640ustar00rootroot00000000000000// Scrape `SHOW GLOBAL STATUS`. package collector import ( "database/sql" "regexp" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Scrape query globalStatusQuery = `SHOW GLOBAL STATUS` // Subsytem. globalStatus = "global_status" ) // Regexp to match various groups of status vars. var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`) var ( globalCommandsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "commands_total"), "Total number of executed MySQL commands.", []string{"command"}, nil, ) globalHandlerDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "handlers_total"), "Total number of executed MySQL handlers.", []string{"handler"}, nil, ) globalConnectionErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "connection_errors_total"), "Total number of MySQL connection errors.", []string{"error"}, nil, ) globalBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_pages"), "Innodb buffer pool pages by state.", []string{"state"}, nil, ) globalBufferPoolPageChangesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_page_changes_total"), "Innodb buffer pool page state changes.", []string{"operation"}, nil, ) globalInnoDBRowOpsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "innodb_row_ops_total"), "Total number of MySQL InnoDB row operations.", []string{"operation"}, nil, ) globalPerformanceSchemaLostDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "performance_schema_lost_total"), "Total number of MySQL instrumentations that could not be loaded or created due to memory constraints.", []string{"instrumentation"}, nil, ) ) // ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`. func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error { globalStatusRows, err := db.Query(globalStatusQuery) if err != nil { return err } defer globalStatusRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "wsrep_local_state_uuid": "", "wsrep_cluster_state_uuid": "", "wsrep_provider_version": "", } for globalStatusRows.Next() { if err := globalStatusRows.Scan(&key, &val); err != nil { return err } if floatVal, ok := parseStatus(val); ok { // Unparsable values are silently skipped. key = strings.ToLower(key) match := globalStatusRE.FindStringSubmatch(key) if match == nil { ch <- prometheus.MustNewConstMetric( newDesc(globalStatus, key, "Generic metric from SHOW GLOBAL STATUS."), prometheus.UntypedValue, floatVal, ) continue } switch match[1] { case "com": ch <- prometheus.MustNewConstMetric( globalCommandsDesc, prometheus.CounterValue, floatVal, match[2], ) case "handler": ch <- prometheus.MustNewConstMetric( globalHandlerDesc, prometheus.CounterValue, floatVal, match[2], ) case "connection_errors": ch <- prometheus.MustNewConstMetric( globalConnectionErrorsDesc, prometheus.CounterValue, floatVal, match[2], ) case "innodb_buffer_pool_pages": switch match[2] { case "data", "dirty", "free", "misc": ch <- prometheus.MustNewConstMetric( globalBufferPoolPagesDesc, prometheus.GaugeValue, floatVal, match[2], ) default: ch <- prometheus.MustNewConstMetric( globalBufferPoolPageChangesDesc, prometheus.CounterValue, floatVal, match[2], ) } case "innodb_rows": ch <- prometheus.MustNewConstMetric( globalInnoDBRowOpsDesc, prometheus.CounterValue, floatVal, match[2], ) case "performance_schema": ch <- prometheus.MustNewConstMetric( globalPerformanceSchemaLostDesc, prometheus.CounterValue, floatVal, match[2], ) } } else if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_galera_variables_info metric. if textItems["wsrep_local_state_uuid"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "status_info"), "PXC/Galera status information.", []string{"wsrep_local_state_uuid", "wsrep_cluster_state_uuid", "wsrep_provider_version"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_local_state_uuid"], textItems["wsrep_cluster_state_uuid"], textItems["wsrep_provider_version"], ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/global_status_test.go000066400000000000000000000057741277522210700257330ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeGlobalStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("Com_alter_db", "1"). AddRow("Com_show_status", "2"). AddRow("Com_select", "3"). AddRow("Connection_errors_internal", "4"). AddRow("Handler_commit", "5"). AddRow("Innodb_buffer_pool_pages_data", "6"). AddRow("Innodb_buffer_pool_pages_flushed", "7"). AddRow("Innodb_rows_read", "8"). AddRow("Performance_schema_users_lost", "9"). AddRow("Slave_running", "OFF"). AddRow("Ssl_version", ""). AddRow("Uptime", "10"). AddRow("wsrep_cluster_status", "Primary"). AddRow("wsrep_local_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_cluster_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_provider_version", "3.16(r5c765eb)") mock.ExpectQuery(sanitizeQuery(globalStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeGlobalStatus(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"command": "alter_db"}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "show_status"}, value: 2, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "select"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"error": "internal"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"handler": "commit"}, value: 5, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"operation": "flushed"}, value: 7, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"operation": "read"}, value: 8, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"instrumentation": "users_lost"}, value: 9, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"wsrep_local_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_cluster_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_provider_version": "3.16(r5c765eb)"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/global_variables.go000066400000000000000000000036131277522210700253070ustar00rootroot00000000000000// Scrape `SHOW GLOBAL VARIABLES`. package collector import ( "database/sql" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Metric subsystem globalVariables = "global_variables" // Metric SQL Queries. globalVariablesQuery = `SHOW GLOBAL VARIABLES` ) // ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`. func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error { globalVariablesRows, err := db.Query(globalVariablesQuery) if err != nil { return err } defer globalVariablesRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "innodb_version": "", "version": "", "version_comment": "", "wsrep_cluster_name": "", } for globalVariablesRows.Next() { if err := globalVariablesRows.Scan(&key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(globalVariables, key, "Generic gauge metric from SHOW GLOBAL VARIABLES."), prometheus.GaugeValue, floatVal, ) continue } else if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_version_info metric. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "version", "info"), "MySQL version and distribution.", []string{"innodb_version", "version", "version_comment"}, nil), prometheus.GaugeValue, 1, textItems["innodb_version"], textItems["version"], textItems["version_comment"], ) // mysql_galera_variables_info metric. if textItems["wsrep_cluster_name"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "variables_info"), "PXC/Galera variables information.", []string{"wsrep_cluster_name"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_cluster_name"], ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/global_variables_test.go000066400000000000000000000045521277522210700263510ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeGlobalVariables(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("wait_timeout", "28800"). AddRow("version_compile_os", "Linux"). AddRow("userstat", "OFF"). AddRow("transaction_prealloc_size", "4096"). AddRow("tx_isolation", "REPEATABLE-READ"). AddRow("tmp_table_size", "16777216"). AddRow("tmpdir", "/tmp"). AddRow("sync_binlog", "0"). AddRow("sync_frm", "ON"). AddRow("slow_launch_time", "2"). AddRow("innodb_version", "5.6.30-76.3"). AddRow("version", "5.6.30-76.3-56"). AddRow("version_comment", "Percona XtraDB Cluster..."). AddRow("wsrep_cluster_name", "supercluster") mock.ExpectQuery(globalVariablesQuery).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeGlobalVariables(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 28800, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 4096, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 16777216, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"innodb_version": "5.6.30-76.3", "version": "5.6.30-76.3-56", "version_comment": "Percona XtraDB Cluster..."}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"wsrep_cluster_name": "supercluster"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema.go000066400000000000000000000001111277522210700242600ustar00rootroot00000000000000package collector // Subsystem. const informationSchema = "info_schema" prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_auto_increment.go000066400000000000000000000037241277522210700273710ustar00rootroot00000000000000// Scrape auto_increment column information. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaAutoIncrementQuery = ` SELECT table_schema, table_name, column_name, auto_increment, pow(2, case data_type when 'tinyint' then 7 when 'smallint' then 15 when 'mediumint' then 23 when 'int' then 31 when 'bigint' then 63 end+(column_type like '% unsigned'))-1 as max_int FROM information_schema.tables t JOIN information_schema.columns c USING (table_schema,table_name) WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL ` var ( globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"), "The current value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) globalInfoSchemaAutoIncrementMaxDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column_max"), "The max value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) ) // ScrapeAutoIncrementColumns collects auto_increment column information. func ScrapeAutoIncrementColumns(db *sql.DB, ch chan<- prometheus.Metric) error { autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery) if err != nil { return err } defer autoIncrementRows.Close() var ( schema, table, column string value, max float64 ) for autoIncrementRows.Next() { if err := autoIncrementRows.Scan( &schema, &table, &column, &value, &max, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementDesc, prometheus.GaugeValue, value, schema, table, column, ) ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementMaxDesc, prometheus.GaugeValue, max, schema, table, column, ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_clientstats.go000066400000000000000000000236701277522210700267140ustar00rootroot00000000000000// Scrape `information_schema.client_statistics`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const clientStatQuery = `SELECT * FROM information_schema.client_statistics` var ( // Map known client-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaClientStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_connections"), "The number of connections created for this client.", []string{"client"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_concurrent_connections"), "The number of concurrent connections for this client.", []string{"client"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this client.", []string{"client"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this client.", []string{"client"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this client's connections.", []string{"client"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_received_total"), "The number of bytes received from this client’s connections.", []string{"client"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_sent_total"), "The number of bytes sent to this client’s connections.", []string{"client"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this client’s connections.", []string{"client"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_read_total"), "The number of rows read by this client’s connections.", []string{"client"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_sent_total"), "The number of rows sent by this client’s connections.", []string{"client"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_deleted_total"), "The number of rows deleted by this client’s connections.", []string{"client"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_inserted_total"), "The number of rows inserted by this client’s connections.", []string{"client"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_fetched_total"), "The number of rows fetched by this client’s connections.", []string{"client"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_updated_total"), "The number of rows updated by this client’s connections.", []string{"client"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_table_rows_read_total"), "The number of rows read from tables by this client’s connections. (It may be different from ROWS_FETCHED.)", []string{"client"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_select_commands_total"), "The number of SELECT commands executed from this client’s connections.", []string{"client"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_update_commands_total"), "The number of UPDATE commands executed from this client’s connections.", []string{"client"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_other_commands_total"), "The number of other commands executed from this client’s connections.", []string{"client"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this client’s connections.", []string{"client"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this client’s connections.", []string{"client"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_denied_connections_total"), "The number of connections denied to this client.", []string{"client"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_lost_connections_total"), "The number of this client’s connections that were terminated uncleanly.", []string{"client"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_access_denied_total"), "The number of times this client’s connections issued commands that were denied.", []string{"client"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_empty_queries_total"), "The number of times this client’s connections sent empty queries to the server.", []string{"client"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_ssl_connections_total"), "The number of times this client’s connections connected using SSL to the server.", []string{"client"}, nil)}, "MAX_STATEMENT_TIME_EXCEEDED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_max_statement_time_exceeded_total"), "The number of times a statement was aborted, because it was executed longer than its MAX_STATEMENT_TIME threshold.", []string{"client"}, nil)}, } ) // ScrapeClientStat collects from `information_schema.client_statistics`. func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed client stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaClientStatisticsRows, err := db.Query(clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_clientstats_test.go000066400000000000000000000074061277522210700277520ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeClientStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"CLIENT", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("localhost", 1002, 0, 127027, 286, 245, 2565104853, 21090856, 2380108042, 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(clientStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeClientStat(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"client": "localhost"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client": "localhost"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 2565104853, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 2380108042, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_innodb_metrics.go000066400000000000000000000101571277522210700273520ustar00rootroot00000000000000// Scrape `information_schema.innodb_metrics`. package collector import ( "database/sql" "regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const infoSchemaInnodbMetricsQuery = ` SELECT name, subsystem, type, comment, count FROM information_schema.innodb_metrics WHERE status = 'enabled' ` // Metrics descriptors. var ( infoSchemaBufferPageReadTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_read_total"), "Total number of buffer pages read total.", []string{"type"}, nil, ) infoSchemaBufferPageWrittenTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_written_total"), "Total number of buffer pages written total.", []string{"type"}, nil, ) infoSchemaBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_pages"), "Total number of buffer pool pages by state.", []string{"state"}, nil, ) infoSchemaBufferPoolPagesDirtyDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_dirty_pages"), "Total number of dirty pages in the buffer pool.", nil, nil, ) ) // Regexp for matching metric aggregations. var ( bufferRE = regexp.MustCompile(`^buffer_(pool_pages)_(.*)$`) bufferPageRE = regexp.MustCompile(`^buffer_page_(read|written)_(.*)$`) ) // ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`. func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error { innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery) if err != nil { return err } defer innodbMetricsRows.Close() var ( name, subsystem, metricType, comment string value float64 ) for innodbMetricsRows.Next() { if err := innodbMetricsRows.Scan( &name, &subsystem, &metricType, &comment, &value, ); err != nil { return err } // Special handling of the "buffer_page_io" subsystem. if subsystem == "buffer_page_io" { match := bufferPageRE.FindStringSubmatch(name) if len(match) != 3 { log.Warnln("innodb_metrics subsystem buffer_page_io returned an invalid name:", name) continue } switch match[1] { case "read": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageReadTotalDesc, prometheus.CounterValue, value, match[2], ) case "written": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageWrittenTotalDesc, prometheus.CounterValue, value, match[2], ) } continue } if subsystem == "buffer" { match := bufferRE.FindStringSubmatch(name) // Many buffer subsystem metrics are not matched, fall through to generic metric. if match != nil { switch match[1] { case "pool_pages": switch match[2] { case "total": // Ignore total, it is an aggregation of the rest. continue case "dirty": // Dirty pages are a separate metric, not in the total. ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDirtyDesc, prometheus.GaugeValue, value, ) default: ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDesc, prometheus.GaugeValue, value, match[2], ) } } continue } } metricName := "innodb_metrics_" + subsystem + "_" + name // MySQL returns counters named two different ways. "counter" and "status_counter" // value >= 0 is necessary due to upstream bugs: http://bugs.mysql.com/bug.php?id=75966 if (metricType == "counter" || metricType == "status_counter") && value >= 0 { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName+"_total"), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.CounterValue, value, ) } else { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.GaugeValue, value, ) } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_innodb_metrics_test.go000066400000000000000000000051711277522210700304110ustar00rootroot00000000000000package collector import ( "flag" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeInnodbMetrics(t *testing.T) { // Suppress a log messages err := flag.Set("log.level", "fatal") if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"name", "subsystem", "type", "comment", "count"} rows := sqlmock.NewRows(columns). AddRow("lock_timeouts", "lock", "counter", "Number of lock timeouts", 0). AddRow("buffer_pool_reads", "buffer", "status_counter", "Number of reads directly from disk (innodb_buffer_pool_reads)", 1). AddRow("buffer_pool_size", "server", "value", "Server buffer pool size (all buffer pools) in bytes", 2). AddRow("buffer_page_read_system_page", "buffer_page_io", "counter", "Number of System Pages read", 3). AddRow("buffer_page_written_undo_log", "buffer_page_io", "counter", "Number of Undo Log Pages written", 4). AddRow("buffer_pool_pages_dirty", "buffer", "gauge", "Number of dirt buffer pool pages", 5). AddRow("buffer_pool_pages_data", "buffer", "gauge", "Number of data buffer pool pages", 6). AddRow("buffer_pool_pages_total", "buffer", "gauge", "Number of total buffer pool pages", 7). AddRow("NOPE", "buffer_page_io", "counter", "An invalid buffer_page_io metric", 999) mock.ExpectQuery(sanitizeQuery(infoSchemaInnodbMetricsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeInnodbMetrics(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"type": "system_page"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"type": "undo_log"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 5, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_innodb_sys_tablespaces.go000066400000000000000000000046741277522210700310770ustar00rootroot00000000000000// Scrape `information_schema.innodb_sys_tablespaces`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const innodbTablespacesQuery = ` SELECT SPACE, NAME, ifnull(FILE_FORMAT, 'NONE') as FILE_FORMAT, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(SPACE_TYPE, 'NONE') as SPACE_TYPE, FILE_SIZE, ALLOCATED_SIZE FROM information_schema.innodb_sys_tablespaces ` var ( infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"), "The Tablespace information and Space ID.", []string{"tablespace_name", "file_format", "row_format", "space_type"}, nil, ) infoSchemaInnodbTablesspaceFileSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_file_size_bytes"), "The apparent size of the file, which represents the maximum size of the file, uncompressed.", []string{"tablespace_name"}, nil, ) infoSchemaInnodbTablesspaceAllocatedSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_allocated_size_bytes"), "The actual size of the file, which is the amount of space allocated on disk.", []string{"tablespace_name"}, nil, ) ) // ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`. func ScrapeInfoSchemaInnodbTablespaces(db *sql.DB, ch chan<- prometheus.Metric) error { tablespacesRows, err := db.Query(innodbTablespacesQuery) if err != nil { return err } defer tablespacesRows.Close() var ( tableSpace uint32 tableName string fileFormat string rowFormat string spaceType string fileSize uint64 allocatedSize uint64 ) for tablespacesRows.Next() { err = tablespacesRows.Scan( &tableSpace, &tableName, &fileFormat, &rowFormat, &spaceType, &fileSize, &allocatedSize, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceInfoDesc, prometheus.GaugeValue, float64(tableSpace), tableName, fileFormat, rowFormat, spaceType, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceFileSizeDesc, prometheus.GaugeValue, float64(fileSize), tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceAllocatedSizeDesc, prometheus.GaugeValue, float64(allocatedSize), tableName, ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_innodb_sys_tablespaces_test.go000066400000000000000000000041061277522210700321240ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeInfoSchemaInnodbTablespaces(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"SPACE", "NAME", "FILE_FORMAT", "ROW_FORMAT", "SPACE_TYPE", "FILE_SIZE", "ALLOCATED_SIZE"} rows := sqlmock.NewRows(columns). AddRow(1, "sys/sys_config", "Barracuda", "Dynamic", "Single", 100, 100). AddRow(2, "db/compressed", "Barracuda", "Compressed", "Single", 300, 200) mock.ExpectQuery(sanitizeQuery(innodbTablespacesQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"tablespace_name": "sys/sys_config", "file_format": "Barracuda", "row_format": "Dynamic", "space_type": "Single"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed", "file_format": "Barracuda", "row_format": "Compressed", "space_type": "Single"}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 300, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 200, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_processlist.go000066400000000000000000000151531277522210700267260ustar00rootroot00000000000000// Scrape `information_schema.processlist`. package collector import ( "database/sql" "flag" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaProcesslistQuery = ` SELECT COALESCE(command,''),COALESCE(state,''),count(*),sum(time) FROM information_schema.processlist WHERE ID != connection_id() AND TIME >= %d GROUP BY command,state ORDER BY null ` var ( // Tunable flags. processlistMinTime = flag.Int( "collect.info_schema.processlist.min_time", 0, "Minimum time a thread must be in each state to be counted", ) // Prometheus descriptors. processlistCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "threads"), "The number of threads (connections) split by current state.", []string{"state"}, nil) processlistTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "threads_seconds"), "The number of seconds threads (connections) have used split by current state.", []string{"state"}, nil) ) // whitelist for connection/process states in SHOW PROCESSLIST // tokudb uses the state column for "Queried about _______ rows" var ( // TODO: might need some more keys for other MySQL versions or other storage engines // see https://dev.mysql.com/doc/refman/5.7/en/general-thread-states.html threadStateCounterMap = map[string]uint32{ "after create": uint32(0), "altering table": uint32(0), "analyzing": uint32(0), "checking permissions": uint32(0), "checking table": uint32(0), "cleaning up": uint32(0), "closing tables": uint32(0), "converting heap to myisam": uint32(0), "copying to tmp table": uint32(0), "creating sort index": uint32(0), "creating table": uint32(0), "creating tmp table": uint32(0), "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), "end": uint32(0), "freeing items": uint32(0), "flushing tables": uint32(0), "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), "waiting for lock": uint32(0), "logging slow query": uint32(0), "login": uint32(0), "manage keys": uint32(0), "opening tables": uint32(0), "optimizing": uint32(0), "preparing": uint32(0), "reading from net": uint32(0), "removing duplicates": uint32(0), "removing tmp table": uint32(0), "reopen tables": uint32(0), "repair by sorting": uint32(0), "repair done": uint32(0), "repair with keycache": uint32(0), "replication master": uint32(0), "rolling back": uint32(0), "searching rows for update": uint32(0), "sending data": uint32(0), "sorting for group": uint32(0), "sorting for order": uint32(0), "sorting index": uint32(0), "sorting result": uint32(0), "statistics": uint32(0), "updating": uint32(0), "waiting for tables": uint32(0), "waiting for table flush": uint32(0), "waiting on cond": uint32(0), "writing to net": uint32(0), "other": uint32(0), } threadStateMapping = map[string]string{ "user sleep": "idle", "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", "setup": "altering table", "renaming result table": "altering table", "preparing for alter table": "altering table", "copying to group table": "copying to tmp table", "copy to tmp table": "copying to tmp table", "query end": "end", "update": "updating", "updating main table": "updating", "updating reference tables": "updating", "system lock": "waiting for lock", "user lock": "waiting for lock", "table lock": "waiting for lock", "deleting from main table": "deleting", "deleting from reference tables": "deleting", } ) func deriveThreadState(command string, state string) string { var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1) var normState = strings.Replace(strings.ToLower(state), "_", " ", -1) // check if it's already a valid state _, knownState := threadStateCounterMap[normState] if knownState { return normState } // check if plain mapping applies mappedState, canMap := threadStateMapping[normState] if canMap { return mappedState } // check special waiting for XYZ lock if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") { return "waiting for lock" } if normCmd == "sleep" && normState == "" { return "idle" } if normCmd == "query" { return "executing" } if normCmd == "binlog dump" { return "replication master" } return "other" } // ScrapeProcesslist collects from `information_schema.processlist`. func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, *processlistMinTime, ) processlistRows, err := db.Query(processQuery) if err != nil { return err } defer processlistRows.Close() var ( command string state string count uint32 time uint32 ) stateCounts := make(map[string]uint32, len(threadStateCounterMap)) stateTime := make(map[string]uint32, len(threadStateCounterMap)) for k, v := range threadStateCounterMap { stateCounts[k] = v stateTime[k] = v } for processlistRows.Next() { err = processlistRows.Scan(&command, &state, &count, &time) if err != nil { return err } realState := deriveThreadState(command, state) stateCounts[realState] += count stateTime[realState] += time } for state, count := range stateCounts { ch <- prometheus.MustNewConstMetric(processlistCountDesc, prometheus.GaugeValue, float64(count), state) } for state, time := range stateTime { ch <- prometheus.MustNewConstMetric(processlistTimeDesc, prometheus.GaugeValue, float64(time), state) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_query_response_time.go000066400000000000000000000053011277522210700304470ustar00rootroot00000000000000// Scrape `information_schema.query_response_time`. package collector import ( "database/sql" "fmt" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const ( queryResponseCheckQuery = `SELECT @@query_response_time_stats` queryResponseTimeQuery = ` SELECT TIME, COUNT, TOTAL FROM information_schema.query_response_time ` ) var ( infoSchemaQueryResponseTimeCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "query_response_time_count"), "The number of queries according to the length of time they took to execute.", []string{}, nil, ) infoSchemaQueryResponseTimeTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "query_response_time_total"), "Total time of queries according to the length of time they took to execute separately.", []string{"le"}, nil, ) ) // ScrapeQueryResponseTime collects from `information_schema.query_response_time`. func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error { var queryStats uint8 err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats) if err != nil { log.Debugln("Query response time distribution is not present.") return nil } if queryStats == 0 { log.Debugln("MySQL @@query_response_time_stats is OFF.") return nil } queryDistributionRows, err := db.Query(queryResponseTimeQuery) if err != nil { return err } defer queryDistributionRows.Close() var ( length string count uint64 total string histogramCnt uint64 histogramSum float64 countBuckets = map[float64]uint64{} ) for queryDistributionRows.Next() { err = queryDistributionRows.Scan( &length, &count, &total, ) if err != nil { return err } length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64) total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64) histogramCnt += count histogramSum += total // Special case for "TOO LONG" row where we take into account the count field which is the only available // and do not add it as a part of histogram or metric if length == 0 { continue } countBuckets[length] = histogramCnt // No histogram with query total times because they are float ch <- prometheus.MustNewConstMetric( infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum, fmt.Sprintf("%v", length), ) } ch <- prometheus.MustNewConstMetric( infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum, "+Inf", ) // Create histogram with query counts ch <- prometheus.MustNewConstHistogram( infoSchemaQueryResponseTimeCountDesc, histogramCnt, histogramSum, countBuckets, ) return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_query_response_time_test.go000066400000000000000000000062451277522210700315160ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeQueryResponseTime(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(queryResponseCheckQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) rows := sqlmock.NewRows([]string{"TIME", "COUNT", "TOTAL"}). AddRow(0.000001, 124, 0.000000). AddRow(0.000010, 179, 0.000797). AddRow(0.000100, 2859, 0.107321). AddRow(0.001000, 1085, 0.335395). AddRow(0.010000, 269, 0.522264). AddRow(0.100000, 11, 0.344209). AddRow(1.000000, 1, 0.267369). AddRow(10.000000, 0, 0.000000). AddRow(100.000000, 0, 0.000000). AddRow(1000.000000, 0, 0.000000). AddRow(10000.000000, 0, 0.000000). AddRow(100000.000000, 0, 0.000000). AddRow(1000000.000000, 0, 0.000000). AddRow("TOO LONG", 0, "TOO LONG") mock.ExpectQuery(sanitizeQuery(queryResponseTimeQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeQueryResponseTime(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() // Test counters expectTimes := []MetricResult{ {labels: labelMap{"le": "1e-06"}, value: 0}, {labels: labelMap{"le": "1e-05"}, value: 0.000797}, {labels: labelMap{"le": "0.0001"}, value: 0.108118}, {labels: labelMap{"le": "0.001"}, value: 0.443513}, {labels: labelMap{"le": "0.01"}, value: 0.9657769999999999}, {labels: labelMap{"le": "0.1"}, value: 1.3099859999999999}, {labels: labelMap{"le": "1"}, value: 1.5773549999999998}, {labels: labelMap{"le": "10"}, value: 1.5773549999999998}, {labels: labelMap{"le": "100"}, value: 1.5773549999999998}, {labels: labelMap{"le": "1000"}, value: 1.5773549999999998}, {labels: labelMap{"le": "10000"}, value: 1.5773549999999998}, {labels: labelMap{"le": "100000"}, value: 1.5773549999999998}, {labels: labelMap{"le": "1e+06"}, value: 1.5773549999999998}, {labels: labelMap{"le": "+Inf"}, value: 1.5773549999999998}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expectTimes { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Test histogram expectCounts := map[float64]uint64{ 1e-06: 124, 1e-05: 303, 0.0001: 3162, 0.001: 4247, 0.01: 4516, 0.1: 4527, 1: 4528, 10: 4528, 100: 4528, 1000: 4528, 10000: 4528, 100000: 4528, 1e+06: 4528, } expectHistogram := prometheus.MustNewConstHistogram(infoSchemaQueryResponseTimeCountDesc, 4528, 1.5773549999999998, expectCounts) expectPb := &dto.Metric{} expectHistogram.Write(expectPb) gotPb := &dto.Metric{} gotHistogram := <-ch // read the last item from channel gotHistogram.Write(gotPb) convey.Convey("Histogram comparison", t, func() { convey.So(expectPb.Histogram, convey.ShouldResemble, gotPb.Histogram) }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_tables.go000066400000000000000000000074651277522210700256350ustar00rootroot00000000000000// Scrape `information_schema.tables`. package collector import ( "database/sql" "flag" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( tableSchemaQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ifnull(ENGINE, 'NONE') as ENGINE, ifnull(VERSION, '0') as VERSION, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(TABLE_ROWS, '0') as TABLE_ROWS, ifnull(DATA_LENGTH, '0') as DATA_LENGTH, ifnull(INDEX_LENGTH, '0') as INDEX_LENGTH, ifnull(DATA_FREE, '0') as DATA_FREE, ifnull(CREATE_OPTIONS, 'NONE') as CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_SCHEMA = '%s' ` dbListQuery = ` SELECT SCHEMA_NAME FROM information_schema.schemata WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') ` ) var ( tableSchemaDatabases = flag.String( "collect.info_schema.tables.databases", "*", "The list of databases to collect table stats for, or '*' for all", ) infoSchemaTablesVersionDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_version"), "The version number of the table's .frm file", []string{"schema", "table", "type", "engine", "row_format", "create_options"}, nil, ) infoSchemaTablesRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_rows"), "The estimated number of rows in the table from information_schema.tables", []string{"schema", "table"}, nil, ) infoSchemaTablesSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_size"), "The size of the table components from information_schema.tables", []string{"schema", "table", "component"}, nil, ) ) // ScrapeTableSchema collects from `information_schema.tables`. func ScrapeTableSchema(db *sql.DB, ch chan<- prometheus.Metric) error { var dbList []string if *tableSchemaDatabases == "*" { dbListRows, err := db.Query(dbListQuery) if err != nil { return err } defer dbListRows.Close() var database string for dbListRows.Next() { if err := dbListRows.Scan( &database, ); err != nil { return err } dbList = append(dbList, database) } } else { dbList = strings.Split(*tableSchemaDatabases, ",") } for _, database := range dbList { tableSchemaRows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) if err != nil { return err } defer tableSchemaRows.Close() var ( tableSchema string tableName string tableType string engine string version uint64 rowFormat string tableRows uint64 dataLength uint64 indexLength uint64 dataFree uint64 createOptions string ) for tableSchemaRows.Next() { err = tableSchemaRows.Scan( &tableSchema, &tableName, &tableType, &engine, &version, &rowFormat, &tableRows, &dataLength, &indexLength, &dataFree, &createOptions, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTablesVersionDesc, prometheus.GaugeValue, float64(version), tableSchema, tableName, tableType, engine, rowFormat, createOptions, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesRowsDesc, prometheus.GaugeValue, float64(tableRows), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataLength), tableSchema, tableName, "data_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(indexLength), tableSchema, tableName, "index_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataFree), tableSchema, tableName, "data_free", ) } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_tablestats.go000066400000000000000000000050401277522210700265140ustar00rootroot00000000000000// Scrape `information_schema.table_statistics`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const tableStatQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, ROWS_READ, ROWS_CHANGED, ROWS_CHANGED_X_INDEXES FROM information_schema.table_statistics ` var ( infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"), "The number of rows read from the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_total"), "The number of rows changed in the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedXIndexesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_x_indexes_total"), "The number of rows changed in the table, multiplied by the number of indexes changed.", []string{"schema", "table"}, nil, ) ) // ScrapeTableStat collects from `information_schema.table_statistics`. func ScrapeTableStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed table stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaTableStatisticsRows, err := db.Query(tableStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string tableName string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &tableName, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, tableName, ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_tablestats_test.go000066400000000000000000000037151277522210700275620ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeTableStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"TABLE_SCHEMA", "TABLE_NAME", "ROWS_READ", "ROWS_CHANGED", "ROWS_CHANGED_X_INDEXES"} rows := sqlmock.NewRows(columns). AddRow("mysql", "db", 238, 0, 8). AddRow("mysql", "proxies_priv", 99, 1, 0). AddRow("mysql", "user", 1064, 2, 5) mock.ExpectQuery(sanitizeQuery(tableStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeTableStat(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"schema": "mysql", "table": "db"}, value: 238}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 8}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 99}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 1}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 1064}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 2}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 5}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_userstats.go000066400000000000000000000225761277522210700264200ustar00rootroot00000000000000// Scrape `information_schema.user_statistics`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" ) const userStatQuery = `SELECT * FROM information_schema.user_statistics` var ( // Map known user-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaUserStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_connections"), "The number of connections created for this user.", []string{"user"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_concurrent_connections"), "The number of concurrent connections for this user.", []string{"user"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this user.", []string{"user"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this user.", []string{"user"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this user's connections.", []string{"user"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_received_total"), "The number of bytes received from this user’s connections.", []string{"user"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_sent_total"), "The number of bytes sent to this user’s connections.", []string{"user"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this user’s connections.", []string{"user"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_read_total"), "The number of rows read by this user's connections.", []string{"user"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_sent_total"), "The number of rows sent by this user's connections.", []string{"user"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_deleted_total"), "The number of rows deleted by this user's connections.", []string{"user"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_inserted_total"), "The number of rows inserted by this user's connections.", []string{"user"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_fetched_total"), "The number of rows fetched by this user’s connections.", []string{"user"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_updated_total"), "The number of rows updated by this user’s connections.", []string{"user"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_table_rows_read_total"), "The number of rows read from tables by this user’s connections. (It may be different from ROWS_FETCHED.)", []string{"user"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_select_commands_total"), "The number of SELECT commands executed from this user’s connections.", []string{"user"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_update_commands_total"), "The number of UPDATE commands executed from this user’s connections.", []string{"user"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_other_commands_total"), "The number of other commands executed from this user’s connections.", []string{"user"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this user’s connections.", []string{"user"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this user’s connections.", []string{"user"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_denied_connections_total"), "The number of connections denied to this user.", []string{"user"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_lost_connections_total"), "The number of this user’s connections that were terminated uncleanly.", []string{"user"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_access_denied_total"), "The number of times this user’s connections issued commands that were denied.", []string{"user"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_empty_queries_total"), "The number of times this user’s connections sent empty queries to the server.", []string{"user"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_ssl_connections_total"), "The number of times this user’s connections connected using SSL to the server.", []string{"user"}, nil)}, } ) // ScrapeUserStat collects from `information_schema.user_statistics`. func ScrapeUserStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed user stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaUserStatisticsRows, err := db.Query(userStatQuery) if err != nil { return err } defer informationSchemaUserStatisticsRows.Close() // The user column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the user column, userStatData[0] maps to columnNames[1] when reading off the metrics // (because userStatScanArgs is mapped as [ &user, &userData[0], &userData[1] ... &userdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] var columnNames []string columnNames, err = informationSchemaUserStatisticsRows.Columns() if err != nil { return err } var user string // Holds the username, which should be in column 0. var userStatData = make([]float64, len(columnNames)-1) // 1 less because of the user column. var userStatScanArgs = make([]interface{}, len(columnNames)) userStatScanArgs[0] = &user for i := range userStatData { userStatScanArgs[i+1] = &userStatData[i] } for informationSchemaUserStatisticsRows.Next() { err = informationSchemaUserStatisticsRows.Scan(userStatScanArgs...) if err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // user, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaUserStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(userStatData[idx]), user) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("user_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"user"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(userStatData[idx]), user) } } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/info_schema_userstats_test.go000066400000000000000000000073221277522210700274470ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeUserStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"USER", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("user_test", 1002, 0, 127027, 286, 245, 2565104853, 21090856, 2380108042, 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(userStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeUserStat(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"user": "user_test"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"user": "user_test"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 2565104853, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 2380108042, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema.go000066400000000000000000000001111277522210700242610ustar00rootroot00000000000000package collector // Subsystem. const performanceSchema = "perf_schema" prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_events_statements.go000066400000000000000000000175201277522210700301300ustar00rootroot00000000000000// Scrape `performance_schema.events_statements_summary_by_digest`. package collector import ( "database/sql" "flag" "fmt" "github.com/prometheus/client_golang/prometheus" ) const perfEventsStatementsQuery = ` SELECT ifnull(SCHEMA_NAME, 'NONE') as SCHEMA_NAME, DIGEST, LEFT(DIGEST_TEXT, %d) as DIGEST_TEXT, COUNT_STAR, SUM_TIMER_WAIT, SUM_ERRORS, SUM_WARNINGS, SUM_ROWS_AFFECTED, SUM_ROWS_SENT, SUM_ROWS_EXAMINED, SUM_CREATED_TMP_DISK_TABLES, SUM_CREATED_TMP_TABLES, SUM_SORT_MERGE_PASSES, SUM_SORT_ROWS, SUM_NO_INDEX_USED FROM performance_schema.events_statements_summary_by_digest WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') AND last_seen > DATE_SUB(NOW(), INTERVAL %d SECOND) ORDER BY SUM_TIMER_WAIT DESC LIMIT %d ` // Tuning flags. var ( perfEventsStatementsLimit = flag.Int( "collect.perf_schema.eventsstatements.limit", 250, "Limit the number of events statements digests by response time", ) perfEventsStatementsTimeLimit = flag.Int( "collect.perf_schema.eventsstatements.timelimit", 86400, "Limit how old the 'last_seen' events statements can be, in seconds", ) perfEventsStatementsDigestTextLimit = flag.Int( "collect.perf_schema.eventsstatements.digest_text_limit", 120, "Maximum length of the normalized statement text", ) ) // Metric descriptors. var ( performanceSchemaEventsStatementsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_total"), "The total count of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_seconds_total"), "The total time of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_errors_total"), "The errors of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsWarningsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_warnings_total"), "The warnings of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsAffectedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_affected_total"), "The total rows affected of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsSentDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_sent_total"), "The total rows sent of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsExaminedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_examined_total"), "The total rows examined of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_tables_total"), "The total tmp tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpDiskTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_disk_tables_total"), "The total tmp disk tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortMergePassesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_merge_passes_total"), "The total number of merge passes by the sort algorithm performed by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_rows_total"), "The total number of sorted rows by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsNoIndexUsedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_no_index_used_total"), "The total number of statements that used full table scans by digest.", []string{"schema", "digest", "digest_text"}, nil, ) ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. func ScrapePerfEventsStatements(db *sql.DB, ch chan<- prometheus.Metric) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, *perfEventsStatementsDigestTextLimit, *perfEventsStatementsTimeLimit, *perfEventsStatementsLimit, ) // Timers here are returned in picoseconds. perfSchemaEventsStatementsRows, err := db.Query(perfQuery) if err != nil { return err } defer perfSchemaEventsStatementsRows.Close() var ( schemaName, digest, digestText string count, queryTime, errors, warnings uint64 rowsAffected, rowsSent, rowsExamined uint64 tmpTables, tmpDiskTables uint64 sortMergePasses, sortRows uint64 noIndexUsed uint64 ) for perfSchemaEventsStatementsRows.Next() { if err := perfSchemaEventsStatementsRows.Scan( &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, &sortMergePasses, &sortRows, &noIndexUsed, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsDesc, prometheus.CounterValue, float64(count), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTimeDesc, prometheus.CounterValue, float64(queryTime)/picoSeconds, schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsErrorsDesc, prometheus.CounterValue, float64(errors), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsWarningsDesc, prometheus.CounterValue, float64(warnings), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsAffectedDesc, prometheus.CounterValue, float64(rowsAffected), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsSentDesc, prometheus.CounterValue, float64(rowsSent), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsExaminedDesc, prometheus.CounterValue, float64(rowsExamined), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpTablesDesc, prometheus.CounterValue, float64(tmpTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpDiskTablesDesc, prometheus.CounterValue, float64(tmpDiskTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortMergePassesDesc, prometheus.CounterValue, float64(sortMergePasses), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortRowsDesc, prometheus.CounterValue, float64(sortRows), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsNoIndexUsedDesc, prometheus.CounterValue, float64(noIndexUsed), schemaName, digest, digestText, ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_events_waits.go000066400000000000000000000032071277522210700270650ustar00rootroot00000000000000// Scrape `performance_schema.events_waits_summary_global_by_event_name`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfEventsWaitsQuery = ` SELECT EVENT_NAME, COUNT_STAR, SUM_TIMER_WAIT FROM performance_schema.events_waits_summary_global_by_event_name ` // Metric descriptors. var ( performanceSchemaEventsWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_total"), "The total events waits by event name.", []string{"event_name"}, nil, ) performanceSchemaEventsWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_seconds_total"), "The total seconds of events waits by event name.", []string{"event_name"}, nil, ) ) // ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`. func ScrapePerfEventsWaits(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery) if err != nil { return err } defer perfSchemaEventsWaitsRows.Close() var ( eventName string count, time uint64 ) for perfSchemaEventsWaitsRows.Next() { if err := perfSchemaEventsWaitsRows.Scan( &eventName, &count, &time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsDesc, prometheus.CounterValue, float64(count), eventName, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsTimeDesc, prometheus.CounterValue, float64(time)/picoSeconds, eventName, ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_file_events.go000066400000000000000000000061451277522210700266610ustar00rootroot00000000000000// Scrape `performance_schema.file_summary_by_event_name`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfFileEventsQuery = ` SELECT EVENT_NAME, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM performance_schema.file_summary_by_event_name ` // Metric descriptors. var ( performanceSchemaFileEventsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_total"), "The total file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_seconds_total"), "The total seconds of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_bytes_total"), "The total bytes of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) ) // ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. func ScrapePerfFileEvents(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery) if err != nil { return err } defer perfSchemaFileEventsRows.Close() var ( eventName string countRead, timeRead, bytesRead uint64 countWrite, timeWrite, bytesWrite uint64 countMisc, timeMisc uint64 ) for perfSchemaFileEventsRows.Next() { if err := perfSchemaFileEventsRows.Scan( &eventName, &countRead, &timeRead, &bytesRead, &countWrite, &timeWrite, &bytesWrite, &countMisc, &timeMisc, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeRead)/picoSeconds, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeWrite)/picoSeconds, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countMisc), eventName, "misc", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeMisc)/picoSeconds, eventName, "misc", ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_index_io_waits.go000066400000000000000000000067741277522210700273730ustar00rootroot00000000000000// Scrape `performance_schema.table_io_waits_summary_by_index_usage`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfIndexIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, ifnull(INDEX_NAME, 'NONE') as INDEX_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_index_usage WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaIndexWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_total"), "The total number of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) performanceSchemaIndexWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_seconds_total"), "The total time of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) ) // ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`. func ScrapePerfIndexIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaIndexWaitsRows, err := db.Query(perfIndexIOWaitsQuery) if err != nil { return err } defer perfSchemaIndexWaitsRows.Close() var ( objectSchema, objectName, indexName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaIndexWaitsRows.Next() { if err := perfSchemaIndexWaitsRows.Scan( &objectSchema, &objectName, &indexName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, indexName, "fetch", ) // We only include the insert column when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, indexName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, indexName, "fetch", ) // We only update write columns when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, indexName, "delete", ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_index_io_waits_test.go000066400000000000000000000071071277522210700304210ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapePerfIndexIOWaits(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"OBJECT_SCHEMA", "OBJECT_NAME", "INDEX_NAME", "COUNT_FETCH", "COUNT_INSERT", "COUNT_UPDATE", "COUNT_DELETE", "SUM_TIMER_FETCH", "SUM_TIMER_INSERT", "SUM_TIMER_UPDATE", "SUM_TIMER_DELETE"} rows := sqlmock.NewRows(columns). // Note, timers are in picoseconds. AddRow("database", "table", "index", "10", "11", "12", "13", "14000000000000", "15000000000000", "16000000000000", "17000000000000"). AddRow("database", "table", "NONE", "20", "21", "22", "23", "24000000000000", "25000000000000", "26000000000000", "27000000000000") mock.ExpectQuery(sanitizeQuery(perfIndexIOWaitsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapePerfIndexIOWaits(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 10, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 13, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 14, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 16, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 17, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 20, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 21, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 22, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 24, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 25, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 26, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 27, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_table_io_waits.go000066400000000000000000000062161277522210700273420ustar00rootroot00000000000000// Scrape `performance_schema.table_io_waits_summary_by_table`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfTableIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaTableWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_total"), "The total number of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaTableWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_seconds_total"), "The total time of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`. func ScrapePerfTableIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableWaitsRows, err := db.Query(perfTableIOWaitsQuery) if err != nil { return err } defer perfSchemaTableWaitsRows.Close() var ( objectSchema, objectName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaTableWaitsRows.Next() { if err := perfSchemaTableWaitsRows.Scan( &objectSchema, &objectName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, "delete", ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/perf_schema_table_lock_waits.go000066400000000000000000000173721277522210700276700ustar00rootroot00000000000000// Scrape `performance_schema.table_lock_waits_summary_by_table`. package collector import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) const perfTableLockWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_READ_NORMAL, COUNT_READ_WITH_SHARED_LOCKS, COUNT_READ_HIGH_PRIORITY, COUNT_READ_NO_INSERT, COUNT_READ_EXTERNAL, COUNT_WRITE_ALLOW_WRITE, COUNT_WRITE_CONCURRENT_INSERT, COUNT_WRITE_LOW_PRIORITY, COUNT_WRITE_NORMAL, COUNT_WRITE_EXTERNAL, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM performance_schema.table_lock_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema', 'information_schema') ` // Metric descriptors. var ( performanceSchemaSQLTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_total"), "The total number of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_total"), "The total number of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaSQLTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_seconds_total"), "The total time of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_seconds_total"), "The total time of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`. func ScrapePerfTableLockWaits(db *sql.DB, ch chan<- prometheus.Metric) error { perfSchemaTableLockWaitsRows, err := db.Query(perfTableLockWaitsQuery) if err != nil { return err } defer perfSchemaTableLockWaitsRows.Close() var ( objectSchema string objectName string countReadNormal uint64 countReadWithSharedLocks uint64 countReadHighPriority uint64 countReadNoInsert uint64 countReadExternal uint64 countWriteAllowWrite uint64 countWriteConcurrentInsert uint64 countWriteLowPriority uint64 countWriteNormal uint64 countWriteExternal uint64 timeReadNormal uint64 timeReadWithSharedLocks uint64 timeReadHighPriority uint64 timeReadNoInsert uint64 timeReadExternal uint64 timeWriteAllowWrite uint64 timeWriteConcurrentInsert uint64 timeWriteLowPriority uint64 timeWriteNormal uint64 timeWriteExternal uint64 ) for perfSchemaTableLockWaitsRows.Next() { if err := perfSchemaTableLockWaitsRows.Scan( &objectSchema, &objectName, &countReadNormal, &countReadWithSharedLocks, &countReadHighPriority, &countReadNoInsert, &countReadExternal, &countWriteAllowWrite, &countWriteConcurrentInsert, &countWriteLowPriority, &countWriteNormal, &countWriteExternal, &timeReadNormal, &timeReadWithSharedLocks, &timeReadHighPriority, &timeReadNoInsert, &timeReadExternal, &timeWriteAllowWrite, &timeWriteConcurrentInsert, &timeWriteLowPriority, &timeWriteNormal, &timeWriteExternal, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNormal), objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadWithSharedLocks), objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadHighPriority), objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNoInsert), objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteNormal), objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteAllowWrite), objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteConcurrentInsert), objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteLowPriority), objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countReadExternal), objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteExternal), objectSchema, objectName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNormal)/picoSeconds, objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadWithSharedLocks)/picoSeconds, objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadHighPriority)/picoSeconds, objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNoInsert)/picoSeconds, objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteNormal)/picoSeconds, objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteAllowWrite)/picoSeconds, objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteConcurrentInsert)/picoSeconds, objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteLowPriority)/picoSeconds, objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadExternal)/picoSeconds, objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteExternal)/picoSeconds, objectSchema, objectName, "write", ) } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/slave_status.go000066400000000000000000000045361277522210700245410ustar00rootroot00000000000000// Scrape `SHOW SLAVE STATUS`. package collector import ( "database/sql" "fmt" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. slaveStatus = "slave_status" // Query. slaveStatusQuery = `SHOW SLAVE STATUS` ) var slaveStatusQuerySuffixes = [3]string{" NONBLOCKING", " NOLOCK", ""} func columnIndex(slaveCols []string, colName string) int { for idx := range slaveCols { if slaveCols[idx] == colName { return idx } } return -1 } func columnValue(scanArgs []interface{}, slaveCols []string, colName string) string { var columnIndex = columnIndex(slaveCols, colName) if columnIndex == -1 { return "" } return string(*scanArgs[columnIndex].(*sql.RawBytes)) } // ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`. func ScrapeSlaveStatus(db *sql.DB, ch chan<- prometheus.Metric) error { var ( slaveStatusRows *sql.Rows err error ) // Leverage lock-free SHOW SLAVE STATUS by guessing the right suffix for _, suffix := range slaveStatusQuerySuffixes { slaveStatusRows, err = db.Query(fmt.Sprint(slaveStatusQuery, suffix)) if err == nil { break } } if err != nil { return err } defer slaveStatusRows.Close() slaveCols, err := slaveStatusRows.Columns() if err != nil { return err } for slaveStatusRows.Next() { // As the number of columns varies with mysqld versions, // and sql.Scan requires []interface{}, we need to create a // slice of pointers to the elements of slaveData. scanArgs := make([]interface{}, len(slaveCols)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } if err := slaveStatusRows.Scan(scanArgs...); err != nil { return err } masterUUID := columnValue(scanArgs, slaveCols, "Master_UUID") masterHost := columnValue(scanArgs, slaveCols, "Master_Host") channelName := columnValue(scanArgs, slaveCols, "Channel_Name") for i, col := range slaveCols { if value, ok := parseStatus(*scanArgs[i].(*sql.RawBytes)); ok { // Silently skip unparsable values. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, slaveStatus, strings.ToLower(col)), "Generic metric from SHOW SLAVE STATUS.", []string{"master_uuid", "master_host", "channel_name"}, nil, ), prometheus.UntypedValue, value, masterUUID, masterHost, channelName, ) } } } return nil } prometheus-mysqld-exporter-0.9.0+ds/collector/slave_status_test.go000066400000000000000000000033511277522210700255720ustar00rootroot00000000000000package collector import ( "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) func TestScrapeSlaveStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Master_Host", "Read_Master_Log_Pos", "Slave_IO_Running", "Slave_SQL_Running", "Seconds_Behind_Master"} rows := sqlmock.NewRows(columns). AddRow("127.0.0.1", "1", "Connecting", "Yes", "2") mock.ExpectQuery(sanitizeQuery(slaveStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = ScrapeSlaveStatus(db, ch); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"channel_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 2, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expections: %s", err) } } prometheus-mysqld-exporter-0.9.0+ds/example.rules000066400000000000000000000054471277522210700222200ustar00rootroot00000000000000### # Sample prometheus rules/alerts for mysqld. # # NOTE: Please review these carefully as thresholds and behavior may not meet # your SLOs or labels. # ### # Recording Rules # Record slave lag seconds for pre-computed timeseries that takes # `mysql_slave_status_sql_delay` into account mysql_slave_lag_seconds = mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay ### # Galera Alerts # Alert: Galera node is not "ready". ALERT MySQLGaleraNotReady IF mysql_global_status_wsrep_ready != 1 FOR 5m LABELS { severity = "warning" } ANNOTATIONS { summary = "Galera cluster node not ready", description = "{{$labels.job}} on {{$labels.instance}} is not ready.", } # Alert: Galera node state is not synced. ALERT MySQLGaleraOutOfSync IF (mysql_global_status_wsrep_local_state != 4 AND mysql_global_variables_wsrep_desync == 0) FOR 5m LABELS { severity = "warning" } ANNOTATIONS { summary = "Galera cluster node out of sync", description = "{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4).", } # Alert: Galera node is in "doner" state, and is behind applying transactions. ALERT MySQLGaleraDonorFallingBehind IF (mysql_global_status_wsrep_local_state == 2 AND mysql_global_status_wsrep_local_recv_queue > 100) FOR 5m LABELS { severity = "warning" } ANNOTATIONS { summary = "xtradb cluster donor node falling behind", description = "{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) and is falling behind (queue size {{$value}}).", } ### # Replication Alerts # Alert: The replication IO or SQL threads are stopped. ALERT MySQLReplicationNotRunning IF mysql_slave_status_slave_io_running == 0 OR mysql_slave_status_slave_sql_running == 0 FOR 2m LABELS { severity = "critical" } ANNOTATIONS { summary = "Slave replication is not running", description = "Slave replication (IO or SQL) has been down for more than 2 minutes.", } # Alert: The replicaiton lag is non-zero and it predicted to not recover within # 2 minutes. This allows for a small amount of replication lag. ALERT MySQLReplicationLag IF (mysql_slave_lag_seconds > 30) AND on (instance) (predict_linear(mysql_slave_lag_seconds[5m], 60*2) > 0) FOR 1m LABELS { severity = "critical" } ANNOTATIONS { summary = "MySQL slave replication is lagging", description = "The mysql slave replication has fallen behind and is not recovering", } ### # Performance Alerts # Alert: InnoDB log writes are stalling. ALERT MySQLInnoDBLogWaits IF rate(mysql_global_status_innodb_log_waits[15m]) > 10 LABELS { severity = "warning" } ANNOTATIONS { summary = "MySQL innodb log writes stalling", description = "The innodb logs are waiting for disk at a rate of {{$value}} / second", } prometheus-mysqld-exporter-0.9.0+ds/mysqld_exporter.go000066400000000000000000000350511277522210700232730ustar00rootroot00000000000000package main import ( "database/sql" "flag" "fmt" "net/http" "os" "path" "time" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" "github.com/prometheus/common/version" "gopkg.in/ini.v1" "github.com/prometheus/mysqld_exporter/collector" ) var ( showVersion = flag.Bool( "version", false, "Print version information.", ) listenAddress = flag.String( "web.listen-address", ":9104", "Address to listen on for web interface and telemetry.", ) metricPath = flag.String( "web.telemetry-path", "/metrics", "Path under which to expose metrics.", ) configMycnf = flag.String( "config.my-cnf", path.Join(os.Getenv("HOME"), ".my.cnf"), "Path to .my.cnf file to read MySQL credentials from.", ) slowLogFilter = flag.Bool( "log_slow_filter", false, "Add a log_slow_filter to avoid exessive MySQL slow logging. NOTE: Not supported by Oracle MySQL.", ) collectProcesslist = flag.Bool( "collect.info_schema.processlist", false, "Collect current thread state counts from the information_schema.processlist", ) collectTableSchema = flag.Bool( "collect.info_schema.tables", true, "Collect metrics from information_schema.tables", ) collectInnodbTablespaces = flag.Bool( "collect.info_schema.innodb_tablespaces", false, "Collect metrics from information_schema.innodb_sys_tablespaces", ) innodbMetrics = flag.Bool( "collect.info_schema.innodb_metrics", false, "Collect metrics from information_schema.innodb_metrics", ) collectGlobalStatus = flag.Bool( "collect.global_status", true, "Collect from SHOW GLOBAL STATUS", ) collectGlobalVariables = flag.Bool( "collect.global_variables", true, "Collect from SHOW GLOBAL VARIABLES", ) collectSlaveStatus = flag.Bool( "collect.slave_status", true, "Collect from SHOW SLAVE STATUS", ) collectAutoIncrementColumns = flag.Bool( "collect.auto_increment.columns", false, "Collect auto_increment columns and max values from information_schema", ) collectBinlogSize = flag.Bool( "collect.binlog_size", false, "Collect the current size of all registered binlog files", ) collectPerfTableIOWaits = flag.Bool( "collect.perf_schema.tableiowaits", false, "Collect metrics from performance_schema.table_io_waits_summary_by_table", ) collectPerfIndexIOWaits = flag.Bool( "collect.perf_schema.indexiowaits", false, "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage", ) collectPerfTableLockWaits = flag.Bool( "collect.perf_schema.tablelocks", false, "Collect metrics from performance_schema.table_lock_waits_summary_by_table", ) collectPerfEventsStatements = flag.Bool( "collect.perf_schema.eventsstatements", false, "Collect metrics from performance_schema.events_statements_summary_by_digest", ) collectPerfEventsWaits = flag.Bool( "collect.perf_schema.eventswaits", false, "Collect metrics from performance_schema.events_waits_summary_global_by_event_name", ) collectPerfFileEvents = flag.Bool( "collect.perf_schema.file_events", false, "Collect metrics from performance_schema.file_summary_by_event_name", ) collectUserStat = flag.Bool("collect.info_schema.userstats", false, "If running with userstat=1, set to true to collect user statistics", ) collectClientStat = flag.Bool("collect.info_schema.clientstats", false, "If running with userstat=1, set to true to collect client statistics", ) collectTableStat = flag.Bool("collect.info_schema.tablestats", false, "If running with userstat=1, set to true to collect table statistics", ) collectQueryResponseTime = flag.Bool("collect.info_schema.query_response_time", false, "Collect query response time distribution if query_response_time_stats is ON.", ) collectEngineTokudbStatus = flag.Bool("collect.engine_tokudb_status", false, "Collect from SHOW ENGINE TOKUDB STATUS", ) collectEngineInnodbStatus = flag.Bool("collect.engine_innodb_status", false, "Collect from SHOW ENGINE INNODB STATUS", ) ) // Metric name parts. const ( // Namespace for all metrics. namespace = "mysql" // Subsystem(s). exporter = "exporter" ) // SQL Queries. const ( sessionSettingsQuery = `SET SESSION log_slow_filter = 'tmp_table_on_disk,filesort_on_disk'` upQuery = `SELECT 1` ) // landingPage contains the HTML served at '/'. // TODO: Make this nicer and more informative. var landingPage = []byte(` MySQLd exporter

MySQLd exporter

Metrics

`) // Exporter collects MySQL metrics. It implements prometheus.Collector. type Exporter struct { dsn string duration, error prometheus.Gauge totalScrapes prometheus.Counter scrapeErrors *prometheus.CounterVec mysqldUp prometheus.Gauge } // NewExporter returns a new MySQL exporter for the provided DSN. func NewExporter(dsn string) *Exporter { return &Exporter{ dsn: dsn, duration: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_duration_seconds", Help: "Duration of the last scrape of metrics from MySQL.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: exporter, Name: "scrapes_total", Help: "Total number of times MySQL was scraped for metrics.", }), scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: exporter, Name: "scrape_errors_total", Help: "Total number of times an error occured scraping a MySQL.", }, []string{"collector"}), error: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_error", Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).", }), mysqldUp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "up", Help: "Whether the MySQL server is up.", }), } } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { // We cannot know in advance what metrics the exporter will generate // from MySQL. So we use the poor man's describe method: Run a collect // and send the descriptors of all the collected metrics. The problem // here is that we need to connect to the MySQL DB. If it is currently // unavailable, the descriptors will be incomplete. Since this is a // stand-alone exporter and not used as a library within other code // implementing additional metrics, the worst that can happen is that we // don't detect inconsistent metrics created by this exporter // itself. Also, a change in the monitored MySQL instance may change the // exported metrics during the runtime of the exporter. metricCh := make(chan prometheus.Metric) doneCh := make(chan struct{}) go func() { for m := range metricCh { ch <- m.Desc() } close(doneCh) }() e.Collect(metricCh) close(metricCh) <-doneCh } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.scrape(ch) ch <- e.duration ch <- e.totalScrapes ch <- e.error e.scrapeErrors.Collect(ch) ch <- e.mysqldUp } func (e *Exporter) scrape(ch chan<- prometheus.Metric) { e.totalScrapes.Inc() var err error defer func(begun time.Time) { e.duration.Set(time.Since(begun).Seconds()) if err == nil { e.error.Set(0) } else { e.error.Set(1) } }(time.Now()) db, err := sql.Open("mysql", e.dsn) if err != nil { log.Errorln("Error opening connection to database:", err) return } defer db.Close() isUpRows, err := db.Query(upQuery) if err != nil { log.Errorln("Error pinging mysqld:", err) e.mysqldUp.Set(0) return } isUpRows.Close() e.mysqldUp.Set(1) if *slowLogFilter { sessionSettingsRows, err := db.Query(sessionSettingsQuery) if err != nil { log.Errorln("Error setting log_slow_filter:", err) return } sessionSettingsRows.Close() } if *collectGlobalStatus { if err = collector.ScrapeGlobalStatus(db, ch); err != nil { log.Errorln("Error scraping for collect.global_status:", err) e.scrapeErrors.WithLabelValues("collect.global_status").Inc() } } if *collectGlobalVariables { if err = collector.ScrapeGlobalVariables(db, ch); err != nil { log.Errorln("Error scraping for collect.global_variables:", err) e.scrapeErrors.WithLabelValues("collect.global_variables").Inc() } } if *collectSlaveStatus { if err = collector.ScrapeSlaveStatus(db, ch); err != nil { log.Errorln("Error scraping for collect.slave_status:", err) e.scrapeErrors.WithLabelValues("collect.slave_status").Inc() } } if *collectProcesslist { if err = collector.ScrapeProcesslist(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.processlist:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.processlist").Inc() } } if *collectTableSchema { if err = collector.ScrapeTableSchema(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.tables:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.tables").Inc() } } if *collectInnodbTablespaces { if err = collector.ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.innodb_sys_tablespaces:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_sys_tablespaces").Inc() } } if *innodbMetrics { if err = collector.ScrapeInnodbMetrics(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.innodb_metrics:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_metrics").Inc() } } if *collectAutoIncrementColumns { if err = collector.ScrapeAutoIncrementColumns(db, ch); err != nil { log.Errorln("Error scraping for collect.auto_increment.columns:", err) e.scrapeErrors.WithLabelValues("collect.auto_increment.columns").Inc() } } if *collectBinlogSize { if err = collector.ScrapeBinlogSize(db, ch); err != nil { log.Errorln("Error scraping for collect.binlog_size:", err) e.scrapeErrors.WithLabelValues("collect.binlog_size").Inc() } } if *collectPerfTableIOWaits { if err = collector.ScrapePerfTableIOWaits(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.tableiowaits:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.tableiowaits").Inc() } } if *collectPerfIndexIOWaits { if err = collector.ScrapePerfIndexIOWaits(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.indexiowaits:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.indexiowaits").Inc() } } if *collectPerfTableLockWaits { if err = collector.ScrapePerfTableLockWaits(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.tablelocks:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.tablelocks").Inc() } } if *collectPerfEventsStatements { if err = collector.ScrapePerfEventsStatements(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.eventsstatements:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.eventsstatements").Inc() } } if *collectPerfEventsWaits { if err = collector.ScrapePerfEventsWaits(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.eventswaits:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.eventswaits").Inc() } } if *collectPerfFileEvents { if err = collector.ScrapePerfFileEvents(db, ch); err != nil { log.Errorln("Error scraping for collect.perf_schema.file_events:", err) e.scrapeErrors.WithLabelValues("collect.perf_schema.file_events").Inc() } } if *collectUserStat { if err = collector.ScrapeUserStat(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.userstats:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.userstats").Inc() } } if *collectClientStat { if err = collector.ScrapeClientStat(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.clientstats:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.clientstats").Inc() } } if *collectTableStat { if err = collector.ScrapeTableStat(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.tablestats:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.tablestats").Inc() } } if *collectQueryResponseTime { if err = collector.ScrapeQueryResponseTime(db, ch); err != nil { log.Errorln("Error scraping for collect.info_schema.query_response_time:", err) e.scrapeErrors.WithLabelValues("collect.info_schema.query_response_time").Inc() } } if *collectEngineTokudbStatus { if err = collector.ScrapeEngineTokudbStatus(db, ch); err != nil { log.Errorln("Error scraping for collect.engine_tokudb_status:", err) e.scrapeErrors.WithLabelValues("collect.engine_tokudb_status").Inc() } } if *collectEngineInnodbStatus { if err = collector.ScrapeEngineInnodbStatus(db, ch); err != nil { log.Errorln("Error scraping for collect.engine_innodb_status:", err) e.scrapeErrors.WithLabelValues("collect.engine_innodb_status").Inc() } } } func parseMycnf(config interface{}) (string, error) { var dsn string cfg, err := ini.Load(config) if err != nil { return dsn, fmt.Errorf("failed reading ini file: %s", err) } user := cfg.Section("client").Key("user").String() password := cfg.Section("client").Key("password").String() if (user == "") || (password == "") { return dsn, fmt.Errorf("no user or password specified under [client] in %s", config) } host := cfg.Section("client").Key("host").MustString("localhost") port := cfg.Section("client").Key("port").MustUint(3306) socket := cfg.Section("client").Key("socket").String() if socket != "" { dsn = fmt.Sprintf("%s:%s@unix(%s)/", user, password, socket) } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port) } log.Debugln(dsn) return dsn, nil } func init() { prometheus.MustRegister(version.NewCollector("mysqld_exporter")) } func main() { flag.Parse() if *showVersion { fmt.Fprintln(os.Stdout, version.Print("mysqld_exporter")) os.Exit(0) } log.Infoln("Starting mysqld_exporter", version.Info()) log.Infoln("Build context", version.BuildContext()) dsn := os.Getenv("DATA_SOURCE_NAME") if len(dsn) == 0 { var err error if dsn, err = parseMycnf(*configMycnf); err != nil { log.Fatal(err) } } exporter := NewExporter(dsn) prometheus.MustRegister(exporter) http.Handle(*metricPath, prometheus.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write(landingPage) }) log.Infoln("Listening on", *listenAddress) log.Fatal(http.ListenAndServe(*listenAddress, nil)) } prometheus-mysqld-exporter-0.9.0+ds/mysqld_exporter_test.go000066400000000000000000000046411277522210700243330ustar00rootroot00000000000000package main import ( "testing" "github.com/smartystreets/goconvey/convey" ) func TestParseMycnf(t *testing.T) { const ( tcpConfig = ` [client] user = root password = abc123 ` tcpConfig2 = ` [client] user = root password = abc123 port = 3308 ` socketConfig = ` [client] user = user password = pass socket = /var/lib/mysql/mysql.sock ` socketConfig2 = ` [client] user = dude password = nopassword # host and port will not be used because of socket presence host = 1.2.3.4 port = 3307 socket = /var/lib/mysql/mysql.sock ` remoteConfig = ` [client] user = dude password = nopassword host = 1.2.3.4 port = 3307 ` badConfig = ` [client] user = root ` badConfig2 = ` [client] password = abc123 socket = /var/lib/mysql/mysql.sock ` badConfig3 = ` [hello] world = ismine ` badConfig4 = ` [hello] world ` ) convey.Convey("Various .my.cnf configurations", t, func() { convey.Convey("Local tcp connection", func() { dsn, _ := parseMycnf([]byte(tcpConfig)) convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3306)/") }) convey.Convey("Local tcp connection on non-default port", func() { dsn, _ := parseMycnf([]byte(tcpConfig2)) convey.So(dsn, convey.ShouldEqual, "root:abc123@tcp(localhost:3308)/") }) convey.Convey("Socket connection", func() { dsn, _ := parseMycnf([]byte(socketConfig)) convey.So(dsn, convey.ShouldEqual, "user:pass@unix(/var/lib/mysql/mysql.sock)/") }) convey.Convey("Socket connection ignoring defined host", func() { dsn, _ := parseMycnf([]byte(socketConfig2)) convey.So(dsn, convey.ShouldEqual, "dude:nopassword@unix(/var/lib/mysql/mysql.sock)/") }) convey.Convey("Remote connection", func() { dsn, _ := parseMycnf([]byte(remoteConfig)) convey.So(dsn, convey.ShouldEqual, "dude:nopassword@tcp(1.2.3.4:3307)/") }) convey.Convey("Missed user", func() { _, err := parseMycnf([]byte(badConfig)) convey.So(err, convey.ShouldNotBeNil) }) convey.Convey("Missed password", func() { _, err := parseMycnf([]byte(badConfig2)) convey.So(err, convey.ShouldNotBeNil) }) convey.Convey("No [client] section", func() { _, err := parseMycnf([]byte(badConfig3)) convey.So(err, convey.ShouldNotBeNil) }) convey.Convey("Invalid config", func() { _, err := parseMycnf([]byte(badConfig4)) convey.So(err, convey.ShouldNotBeNil) }) }) }