pax_global_header00006660000000000000000000000064144454657320014530gustar00rootroot0000000000000052 comment=6ca2a42f97f3403c7788ff4f374430aa267a6b6b mysqld_exporter-0.15.0/000077500000000000000000000000001444546573200150545ustar00rootroot00000000000000mysqld_exporter-0.15.0/.circleci/000077500000000000000000000000001444546573200167075ustar00rootroot00000000000000mysqld_exporter-0.15.0/.circleci/config.yml000066400000000000000000000056171444546573200207100ustar00rootroot00000000000000--- version: 2.1 orbs: prometheus: prometheus/prometheus@0.17.1 executors: # Whenever the Go version is updated here, .promu.yml # should also be updated. golang: docker: - image: cimg/go:1.20 jobs: test: executor: golang steps: - prometheus/setup_environment - run: make check_license style staticcheck unused build test-short - prometheus/store_artifact: file: mysqld_exporter integration: docker: - image: cimg/go:1.20 - image: << parameters.mysql_image >> environment: MYSQL_ALLOW_EMPTY_PASSWORD: "yes" MYSQL_ROOT_HOST: '%' parameters: mysql_image: type: string steps: - checkout - setup_remote_docker - run: docker version - run: docker-compose --version - run: make build - run: make test codespell: docker: - image: cimg/python:3.11 steps: - checkout - run: pip install codespell - run: codespell --skip=".git,./vendor,ttar,Makefile.common" -L uint,ist,keypair mixin: executor: golang steps: - checkout - run: go install github.com/monitoring-mixins/mixtool/cmd/mixtool@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: make -C mysqld-mixin lint build workflows: version: 2 mysqld_exporter: jobs: - test: filters: tags: only: /.*/ - integration: matrix: parameters: mysql_image: - percona:5.6 - mysql/mysql-server:5.7.33 - mysql/mysql-server:8.0 - mariadb:10.3 - mariadb:10.4 - mariadb:10.5 - mariadb:10.6 - mariadb:10.11 - prometheus/build: name: build parallelism: 3 promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" filters: tags: ignore: /^v.*/ branches: ignore: /^(main|release-.*|.*build-all.*)$/ - prometheus/build: name: build_all parallelism: 12 filters: branches: only: /^(main|release-.*|.*build-all.*)$/ tags: only: /^v.*/ - codespell: filters: tags: only: /.*/ - mixin: filters: tags: only: /.*/ - prometheus/publish_main: context: org-context requires: - test - build_all filters: branches: only: main - prometheus/publish_release: context: org-context requires: - test - build_all filters: tags: only: /^v.*/ branches: ignore: /.*/ mysqld_exporter-0.15.0/.github/000077500000000000000000000000001444546573200164145ustar00rootroot00000000000000mysqld_exporter-0.15.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000015041444546573200211210ustar00rootroot00000000000000 ### Host operating system: output of `uname -a` ### mysqld_exporter version: output of `mysqld_exporter --version` ### MySQL server version ### mysqld_exporter command line flags ### What did you do that produced an error? ### What did you expect to see? ### What did you see instead? mysqld_exporter-0.15.0/.github/dependabot.yml000066400000000000000000000001561444546573200212460ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "gomod" directory: "/" schedule: interval: "monthly" mysqld_exporter-0.15.0/.github/workflows/000077500000000000000000000000001444546573200204515ustar00rootroot00000000000000mysqld_exporter-0.15.0/.github/workflows/golangci-lint.yml000066400000000000000000000013731444546573200237270ustar00rootroot00000000000000name: golangci-lint on: push: paths: - "go.sum" - "go.mod" - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" - ".golangci.yml" pull_request: jobs: golangci: name: lint runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v3 - name: install Go uses: actions/setup-go@v2 with: go-version: 1.20.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint uses: golangci/golangci-lint-action@v3.4.0 with: version: v1.51.2 mysqld_exporter-0.15.0/.gitignore000066400000000000000000000001331444546573200170410ustar00rootroot00000000000000/.build /mysqld_exporter /.release /.tarballs *.tar.gz *.test *-stamp .idea *.iml /vendor mysqld_exporter-0.15.0/.golangci.yml000066400000000000000000000002141444546573200174350ustar00rootroot00000000000000--- # Run only staticcheck for now. Additional linters will be enabled one-by-one. linters: enable: - staticcheck disable-all: true mysqld_exporter-0.15.0/.promu.yml000066400000000000000000000012131444546573200170140ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .circle/config.yml should also # be updated. version: 1.20 repository: path: github.com/prometheus/mysqld_exporter build: flags: -a -tags netgo ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE mysqld_exporter-0.15.0/.yamllint000066400000000000000000000007111444546573200167050ustar00rootroot00000000000000--- extends: default rules: braces: max-spaces-inside: 1 level: error brackets: max-spaces-inside: 1 level: error commas: disable comments: disable comments-indentation: disable document-start: disable indentation: spaces: consistent indent-sequences: consistent key-duplicates: ignore: | config/testdata/section_key_dup.bad.yml line-length: disable truthy: ignore: | .github/workflows/*.yml mysqld_exporter-0.15.0/CHANGELOG.md000066400000000000000000000273641444546573200167010ustar00rootroot00000000000000## master / unreleased BREAKING CHANGES: Changes: * [CHANGE] * [FEATURE] * [ENHANCEMENT] * [BUGFIX] ## 0.15.0 / 2023-06-16 BREAKING CHANGES: The exporter no longer supports the monolithic `DATA_SOURCE_NAME` environment variable. To configure connections to MySQL you can either use a `my.cnf` style config file or command line arguments. For example: export MYSQLD_EXPORTER_PASSWORD=secret mysqld_exporter --mysqld.address=localhost:3306 --mysqld.username=exporter We have also dropped some internal scrape metrics: - `mysql_exporter_scrapes_total` - `mysql_exporter_scrape_errors_total` - `mysql_last_scrape_failed` The default client configuration file is now `.my.cnf` in the process working directory. Use `--config.my-cnf="$HOME/.my.cnf"` to retain the previous default. Changes: * [CHANGE] Allow `tlsCfg.InsecureSkipVerify` outside of mTLS #631 * [CHANGE] Update to exporter-toolkit v0.8.1 #677 * [CHANGE] Fix shared metrics between requests #722 * [CHANGE] Allow empty passwords #742 * [CHANGE] Don't use HOME env in the my-cnf config path. #745 * [FEATURE] Add support for collecting metrics from `sys.user_summary` #628 * [FEATURE] Support for multi-target mysqld probes #651 * [FEATURE] Add MySQL TLS configurations #718 * [FEATURE] Add config reload via /-/reload #734 * [ENHANCEMENT] Add UNIX domain socket support for multi-target scraping #707 * [ENHANCEMENT] Use `STRAIGHT_JOIN` in infoSchemaAutoIncrementQuery #726 * [BUGFIX] Fix `infoSchemaInnodbMetricsEnabledColumnQuery` #687 * [BUGFIX] Allow empty passwords #742 ## 0.14.0 / 2022-01-05 BREAKING CHANGES: Metric names in the info_schema.processlist collector have been changed. #603 Metric names in the info_schema.replica_host collector have been changed. #496 * [CHANGE] Rewrite processlist collector #603 * [FEATURE] Add collector for `replica_host_status` #496 * [ENHANCEMENT] Expose dates as timestamps grom GLOBAL STATUS #561 * [BUGFIX] Fix mysql_slave_hosts_info for mysql 5.5 and mariadb 10.5 #577 * [BUGFIX] Fix logging issues #562 #602 ## 0.13.0 / 2021-05-18 BREAKING CHANGES: Changes related to `replication_group_member_stats` collector: * metric "transaction_in_queue" was Counter instead of Gauge * renamed 3 metrics starting with `mysql_perf_schema_transaction_` to start with `mysql_perf_schema_transactions_` to be consistent with column names * exposing only server's own stats by matching MEMBER_ID with @@server_uuid resulting "member_id" label to be dropped. Changes: * [CHANGE] Switch to go-kit for logs. #433 * [FEATURE] Add `tls.insecure-skip-verify` flag to ignore tls verification errors #417 * [FEATURE] Add collector for AWS Aurora information_schema.replica_host_status #435 * [FEATURE] Add collector for `replication_group_members` #459 * [FEATURE] Add new metrics to `replication_group_member_stats` collector to support MySQL 8.x. #462 * [FEATURE] Add collector for `performance_schema.memory_summary_global_by_event_name` #515 * [FEATURE] Support authenticating using mTLS client cert and no password #539 * [FEATURE] Add TLS and basic authentication #522 * [ENHANCEMENT] Support heartbeats in UTC #471 * [ENHANCEMENT] Improve parsing of boolean strings #548 * [BUGFIX] Fix binlog metrics on mysql 8.x #419 * [BUGFIX] Fix output value of wsrep_cluster_status #473 * [BUGFIX] Fix collect.info_schema.innodb_metrics for new field names (mariadb 10.5+) #494 * [BUGFIX] Fix log output of collect[] params #505 * [BUGFIX] Fix collect.info_schema.innodb_tablespaces for new table names #516 * [BUGFIX] Fix innodb_metrics for mariadb 10.5+ #523 * [BUGFIX] Allow perf_schema.memory summary current_bytes to be negative #517 ## 0.12.1 / 2019-07-10 ### Changes: * Rebuild to update Docker packages. ## 0.12.0 / 2019-07-10 ### BREAKING CHANGES: The minimum supported MySQL version is now 5.5. Collector `info_schema.tables` is now disabled by default due to high cardinality danger. ### Changes: * [CHANGE] Update defaults for MySQL 5.5 #318 * [CHANGE] Update innodb buffer pool mappings #369 * [CHANGE] Disable info_schema.tables collector by default #406 * [BUGFIX] Sanitize metric names in global variables #307 * [BUGFIX] Use GLOBAL to prevent mysql deadlock #336 * [BUGFIX] Clear last_scrape_error on every scrape (PR #368) #367 * [ENHANCEMENT] Add help for some GLOBAL VARIABLES metrics. #326 * [FEATURE] Abort on timeout. #323 * [FEATURE] Add minimal MySQL version to Scraper interface #328 * [FEATURE] Add by_user and by_host metrics to info_schema.processlist collector (PR #333) #334 * [FEATURE] Add wsrep_evs_repl_latency metric collecting. (PR #338) * [FEATURE] Add collector for mysql.user (PR #341) * [FEATURE] Add perf_schema.eventsstatementssum collector #347 * [FEATURE] Add collector to get table stats grouped by schema (PR #354) * [FEATURE] Add replication_applier_status_by_worker metric collecting. (PR #366) ## 0.11.0 / 2018-06-29 ### BREAKING CHANGES: * Flags now use the Kingpin library, and require double-dashes. #222 This also changes the behavior of boolean flags. * Enable: `--collect.global_status` * Disable: `--no-collect.global_status` ### Changes: * [CHANGE] Limit number and lifetime of connections #208 * [ENHANCEMENT] Move session params to DSN #259 * [ENHANCEMENT] Use native DB.Ping() instead of self-written implementation #210 * [FEATURE] Add collector duration metrics #197 * [FEATURE] Add 'collect[]' URL parameter to filter enabled collectors #235 * [FEATURE] Set a `lock_wait_timeout` on the MySQL connection #252 * [FEATURE] Set `last_scrape_error` when an error occurs #237 * [FEATURE] Collect metrics from `performance_schema.replication_group_member_stats` #271 * [FEATURE] Add innodb compression statistic #275 * [FEATURE] Add metrics for the output of `SHOW SLAVE HOSTS` #279 * [FEATURE] Support custom CA truststore and client SSL keypair. #255 * [BUGFIX] Fix perfEventsStatementsQuery #213 * [BUGFIX] Fix `file_instances` metric collector #205 * [BUGFIX] Fix prefix removal in `perf_schema_file_instances` #257 * [BUGFIX] Fix 32bit compile issue #273 * [BUGFIX] Ignore boolean keys in my.cnf. #283 ## 0.10.0 / 2017-04-25 ### BREAKING CHANGES: * `mysql_slave_...` metrics now include an additional `connection_name` label to support mariadb multi-source replication. (#178) ### Changes: * [FEATURE] Add read/write query response time #166 * [FEATURE] Add Galera gcache size metric #169 * [FEATURE] Add MariaDB multi source replication support #178 * [FEATURE] Implement heartbeat metrics #183 * [FEATURE] Add basic `file_summary_by_instance` metrics #189 * [BUGFIX] Workaround MySQL bug 79533 #173 ## 0.9.0 / 2016-09-26 ### BREAKING CHANGES: * InnoDB buffer pool page stats have been renamed/fixed to better support aggregations (#130) ### Changes: * [FEATURE] scrape slave status for multisource replication #134 * [FEATURE] Add client statistics support (+ add tests on users & clients statistics) #138 * [IMPROVEMENT] Consistency of error logging. #144 * [IMPROVEMENT] Add label aggregation for innodb buffer metrics #130 * [IMPROVEMENT] Improved and fixed user/client statistics #149 * [FEATURE] Added the last binlog file number metric. #152 * [MISC] Add an example recording rules file #156 * [FEATURE] Added PXC/Galera info metrics. #155 * [FEATURE] Added metrics from SHOW ENGINE INNODB STATUS. #160 * [IMPROVEMENT] Fix `wsrep_cluster_status` #146 ## 0.8.1 / 2016-05-05 ### Changes: * [BUGFIX] Fix `collect.info_schema.innodb_tablespaces` #119 * [BUGFIX] Fix SLAVE STATUS "Connecting" #125 * [MISC] New release process using docker, circleci and a centralized building tool #120 * [MISC] Typos #121 ## 0.8.0 / 2016-04-19 ### BREAKING CHANGES: * global status `innodb_buffer_pool_pages` have been renamed/labeled. * innodb metrics `buffer_page_io` have been renamed/labeled. ### Changes: * [MISC] Add Travis CI automatic testing. * [MISC] Refactor `mysqld_exporter.go` into collector package. * [FEATURE] Add `mysql_up` metric (PR #99) * [FEATURE] Collect time metrics for processlist (PR #87) * [CHANGE] Separate `innodb_buffer_pool_pages` status metrics (PR #101) * [FEATURE] Added metrics from SHOW ENGINE TOKUDB STATUS (PR #103) * [CHANGE] Add special handling of `buffer_page_io` subsystem (PR #115) * [FEATURE] Add collector for `innodb_sys_tablespaces` (PR #116) ## 0.7.1 / 2016-02-16 ### Changes: * [IMPROVEMENT] Soft error on collector failure (PR #84) * [BUGFIX] Fix `innodb_metrics` collector (PR #85) * [BUGFIX] Parse auto increment values and maximum as float64 (PR #88) ## 0.7.0 / 2016-02-12 ### BREAKING CHANGES: * Global status metrics for "handlers" have been renamed ### Changes: * [FEATURE] New collector for `information_schema.table_statistics` (PR #57) * [FEATURE] New server version metric (PR #59) * [FEATURE] New collector for `information_schema.innodb_metrics` (PR #69) * [FEATURE] Read credentials from ".my.cnf" files (PR #77) * [FEATURE] New collector for query response time distribution (PR #79) * [FEATURE] Add minimum time flag for processlist metrics (PR #82) * [IMPROVEMENT] Collect more metrics from `performance_schema.events_statements_summary_by_digest` (PR #58) * [IMPROVEMENT] Add option to filter metrics queries from the slow log (PR #60) * [IMPROVEMENT] Leverage lock-free SHOW SLAVE STATUS (PR #61) * [IMPROVEMENT] Add labels to global status "handlers" counters (PR #68) * [IMPROVEMENT] Update Makefile.COMMON from utils repo (PR #73) * [BUGFIX] Fix broken error return in the scrape function and log an error (PR #64) * [BUGFIX] Check `log_bin` before running SHOW BINARY LOGS (PR #74) * [BUGFIX] Fixed uint for scrapeInnodbMetrics() and gofmt (PR #81) ## 0.6.0 / 2015-10-28 ### BREAKING CHANGES: * The `digest_text` mapping metric has been removed, now included in all digest metrics (PR #50) * Flags for timing metrics have been removed, now included with related counter flag (PR #48) ### Changes: * [FEATURE] New collector for metrics from information_schema.processlist (PR #34) * [FEATURE] New collector for binlog counts/sizes (PR #35) * [FEATURE] New collector for `performance_schema.{file_summary_by_event_name,events_waits_summary_global_by_event_name}` (PR #49) * [FEATURE] New collector for `information_schema.tables` (PR #51) * [IMPROVEMENT] All collection methods now have enable flags (PR #46) * [IMPROVEMENT] Consolidate `performance_schema` metrics flags (PR #48) * [IMPROVEMENT] Removed need for `digest_text` mapping metric (PR #50) * [IMPROVEMENT] Update docs (PR #52) ## 0.5.0 / 2015-09-22 ### Changes: * [FEATURE] Add metrics for table locks * [BUGFIX] Use uint64 to prevent int64 overflow * [BUGFIX] Correct picsecond times to correct second values ## 0.4.0 / 2015-09-21 ### Changes: * [CHANGE] Limit `events_statements` to recently used * [FEATURE] Add `digest_text` mapping metric * [IMPROVEMENT] General refactoring ## 0.3.0 / 2015-08-31 ### BREAKING CHANGES: * Most metrics have been prefixed with Prometheus subsystem names to avoid conflicts between different collection methods. ### Changes: * [BUGFIX] Separate `slave_status` and `global_status` into separate subsystems. * [IMPROVEMENT] Refactor metrics creation. * [IMPROVEMENT] Add support for `performance_schema.table_io_waits_summary_by_table` collection. * [IMPROVEMENT] Add support for `performance_schema.table_io_waits_summary_by_index_usage` collection. * [IMPROVEMENT] Add support for `performance_schema.events_statements_summary_by_digest` collection. * [IMPROVEMENT] Add support for Percona userstats output collection. * [IMPROVEMENT] Add support for `auto_increment` column metrics collection. * [IMPROVEMENT] Add support for `SHOW GLOBAL VARIABLES` metrics collection. ## 0.2.0 / 2015-06-24 ### BREAKING CHANGES: * Logging-related flags have changed. Metric names have changed. ### Changes: * [IMPROVEMENT] Add Docker support. * [CHANGE] Switch logging to Prometheus' logging library. * [BUGFIX] Fix slave status parsing. * [BUGFIX] Fix truncated numbers. * [CHANGE] Reorganize metrics names and types. ## 0.1.0 / 2015-05-05 ### Initial release mysqld_exporter-0.15.0/CODE_OF_CONDUCT.md000066400000000000000000000002301444546573200176460ustar00rootroot00000000000000# Prometheus Community Code of Conduct Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). mysqld_exporter-0.15.0/CONTRIBUTING.md000066400000000000000000000017561444546573200173160ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). ## Local setup The easiest way to make a local development setup is to use Docker Compose. ``` docker-compose up make make test ``` mysqld_exporter-0.15.0/Dockerfile000066400000000000000000000005241444546573200170470ustar00rootroot00000000000000ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" ARG OS="linux" COPY .build/${OS}-${ARCH}/mysqld_exporter /bin/mysqld_exporter EXPOSE 9104 USER nobody ENTRYPOINT [ "/bin/mysqld_exporter" ] mysqld_exporter-0.15.0/LICENSE000066400000000000000000000261351444546573200160700ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mysqld_exporter-0.15.0/MAINTAINERS.md000066400000000000000000000000401444546573200171420ustar00rootroot00000000000000* Ben Kochie mysqld_exporter-0.15.0/Makefile000066400000000000000000000017431444546573200165210ustar00rootroot00000000000000# Copyright 2015 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Needs to be defined before including Makefile.common to auto-generate targets DOCKER_ARCHS ?= amd64 armv7 arm64 all: vet include Makefile.common STATICCHECK_IGNORE = DOCKER_IMAGE_NAME ?= mysqld-exporter .PHONY: test-docker-single-exporter test-docker-single-exporter: @echo ">> testing docker image for single exporter" ./test_image.sh "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" 9104 .PHONY: test-docker mysqld_exporter-0.15.0/Makefile.common000066400000000000000000000216551444546573200200140ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.51.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) GOLANGCI_LINT := else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get -d $$m; \ done $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format common-format: @echo ">> formatting code" $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: @echo ">> running check for unused/missing packages in go.mod" $(GO) mod tidy @git diff --exit-code -- go.sum go.mod .PHONY: common-build common-build: promu @echo ">> building binaries" $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef mysqld_exporter-0.15.0/NOTICE000066400000000000000000000001011444546573200157500ustar00rootroot00000000000000Exporter for MySQL daemon. Copyright 2015 The Prometheus Authors mysqld_exporter-0.15.0/README.md000066400000000000000000000346351444546573200163460ustar00rootroot00000000000000# MySQL Server Exporter [![Build Status](https://travis-ci.org/prometheus/mysqld_exporter.svg)][travis] [![CircleCI](https://circleci.com/gh/prometheus/mysqld_exporter/tree/main.svg?style=shield)][circleci] [![Docker Repository on Quay](https://quay.io/repository/prometheus/mysqld-exporter/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/mysqld-exporter.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/mysqld_exporter)](https://goreportcard.com/report/github.com/prometheus/mysqld_exporter) Prometheus exporter for MySQL server metrics. Supported versions: * MySQL >= 5.6. * MariaDB >= 10.3 NOTE: Not all collection methods are supported on MySQL/MariaDB < 5.6 ## Building and running ### Required Grants ```sql CREATE USER 'exporter'@'localhost' IDENTIFIED BY 'XXXXXXXX' WITH MAX_USER_CONNECTIONS 3; GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'exporter'@'localhost'; ``` NOTE: It is recommended to set a max connection limit for the user to avoid overloading the server with monitoring scrapes under heavy load. This is not supported on all MySQL/MariaDB versions; for example, MariaDB 10.1 (provided with Ubuntu 18.04) [does _not_ support this feature](https://mariadb.com/kb/en/library/create-user/#resource-limit-options). ### Build make build ### Running ##### Single exporter mode Running using `.my.cnf` from the current directory: ./mysqld_exporter ##### Multi-target support This exporter supports the multi-target pattern. This allows running a single instance of this exporter for multiple MySQL targets. To use the multi-target functionality, send an http request to the endpoint `/probe?target=foo:3306` where target is set to the DSN of the MySQL instance to scrape metrics from. To avoid putting sensitive information like username and password in the URL, you can have multiple configurations in `config.my-cnf` file and match it by adding `&auth_module=
` to the request. Sample config file for multiple configurations [client] user = foo password = foo123 [client.servers] user = bar password = bar123 On the prometheus side you can set a scrape config as follows - job_name: mysql # To get metrics about the mysql exporter’s targets params: # Not required. Will match value to child in config file. Default value is `client`. auth_module: [client.servers] static_configs: - targets: # All mysql hostnames or unix sockets to monitor. - server1:3306 - server2:3306 - unix:///run/mysqld/mysqld.sock relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - target_label: __address__ # The mysqld_exporter host:port replacement: localhost:9104 ##### Flag format Example format for flags for version > 0.10.0: --collect.auto_increment.columns --no-collect.auto_increment.columns Example format for flags for version <= 0.10.0: -collect.auto_increment.columns -collect.auto_increment.columns=[true|false] ### Collector Flags Name | MySQL Version | Description -------------------------------------------------------------|---------------|------------------------------------------------------------------------------------ collect.auto_increment.columns | 5.1 | Collect auto_increment columns and max values from information_schema. collect.binlog_size | 5.1 | Collect the current size of all registered binlog files collect.engine_innodb_status | 5.1 | Collect from SHOW ENGINE INNODB STATUS. collect.engine_tokudb_status | 5.6 | Collect from SHOW ENGINE TOKUDB STATUS. collect.global_status | 5.1 | Collect from SHOW GLOBAL STATUS (Enabled by default) collect.global_variables | 5.1 | Collect from SHOW GLOBAL VARIABLES (Enabled by default) collect.heartbeat | 5.1 | Collect from [heartbeat](#heartbeat). collect.heartbeat.database | 5.1 | Database from where to collect heartbeat data. (default: heartbeat) collect.heartbeat.table | 5.1 | Table from where to collect heartbeat data. (default: heartbeat) collect.heartbeat.utc | 5.1 | Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`). (default: false) collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics. collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics. collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces. collect.info_schema.innodb_cmp | 5.5 | Collect InnoDB compressed tables metrics from information_schema.innodb_cmp. collect.info_schema.innodb_cmpmem | 5.5 | Collect InnoDB buffer pool compression metrics from information_schema.innodb_cmpmem. collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist. collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0) collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON. collect.info_schema.replica_host | 5.6 | Collect metrics from information_schema.replica_host_status. collect.info_schema.tables | 5.1 | Collect metrics from information_schema.tables. collect.info_schema.tables.databases | 5.1 | The list of databases to collect table stats for, or '`*`' for all. collect.info_schema.tablestats | 5.1 | If running with userstat=1, set to true to collect table statistics. collect.info_schema.schemastats | 5.1 | If running with userstat=1, set to true to collect schema statistics collect.info_schema.userstats | 5.1 | If running with userstat=1, set to true to collect user statistics. collect.mysql.user | 5.5 | Collect data from mysql.user table collect.perf_schema.eventsstatements | 5.6 | Collect metrics from performance_schema.events_statements_summary_by_digest. collect.perf_schema.eventsstatements.digest_text_limit | 5.6 | Maximum length of the normalized statement text. (default: 120) collect.perf_schema.eventsstatements.limit | 5.6 | Limit the number of events statements digests by response time. (default: 250) collect.perf_schema.eventsstatements.timelimit | 5.6 | Limit how old the 'last_seen' events statements can be, in seconds. (default: 86400) collect.perf_schema.eventsstatementssum | 5.7 | Collect metrics from performance_schema.events_statements_summary_by_digest summed. collect.perf_schema.eventswaits | 5.5 | Collect metrics from performance_schema.events_waits_summary_global_by_event_name. collect.perf_schema.file_events | 5.6 | Collect metrics from performance_schema.file_summary_by_event_name. collect.perf_schema.file_instances | 5.5 | Collect metrics from performance_schema.file_summary_by_instance. collect.perf_schema.file_instances.remove_prefix | 5.5 | Remove path prefix in performance_schema.file_summary_by_instance. collect.perf_schema.indexiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage. collect.perf_schema.memory_events | 5.7 | Collect metrics from performance_schema.memory_summary_global_by_event_name. collect.perf_schema.memory_events.remove_prefix | 5.7 | Remove instrument prefix in performance_schema.memory_summary_global_by_event_name. collect.perf_schema.tableiowaits | 5.6 | Collect metrics from performance_schema.table_io_waits_summary_by_table. collect.perf_schema.tablelocks | 5.6 | Collect metrics from performance_schema.table_lock_waits_summary_by_table. collect.perf_schema.replication_group_members | 5.7 | Collect metrics from performance_schema.replication_group_members. collect.perf_schema.replication_group_member_stats | 5.7 | Collect metrics from performance_schema.replication_group_member_stats. collect.perf_schema.replication_applier_status_by_worker | 5.7 | Collect metrics from performance_schema.replication_applier_status_by_worker. collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default) collect.slave_hosts | 5.1 | Collect from SHOW SLAVE HOSTS collect.sys.user_summary | 5.7 | Collect metrics from sys.x$user_summary (disabled by default). ### General Flags Name | Description -------------------------------------------|-------------------------------------------------------------------------------------------------- mysqld.address | Hostname and port used for connecting to MySQL server, format: `host:port`. (default: `locahost:3306`) mysqld.username | Username to be used for connecting to MySQL Server config.my-cnf | Path to .my.cnf file to read MySQL credentials from. (default: `~/.my.cnf`) log.level | Logging verbosity (default: info) exporter.lock_wait_timeout | Set a lock_wait_timeout (in seconds) on the connection to avoid long metadata locking. (default: 2) exporter.log_slow_filter | Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL. tls.insecure-skip-verify | Ignore tls verification errors. web.config.file | Path to a [web configuration file](#tls-and-basic-authentication) web.listen-address | Address to listen on for web interface and telemetry. web.telemetry-path | Path under which to expose metrics. version | Print the version information. ### Environment Variables Name | Description -------------------------------------------|-------------------------------------------------------------------------------------------------- MYSQLD_EXPORTER_PASSWORD | Password to be used for connecting to MySQL Server ### Configuration precedence If you have configured cli with both `mysqld` flags and a valid configuration file, the options in the configuration file will override the flags for `client` section. ## TLS and basic authentication The MySQLd Exporter supports TLS and basic authentication. To use TLS and/or basic authentication, you need to pass a configuration file using the `--web.config.file` parameter. The format of the file is described [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). ## Customizing Configuration for a SSL Connection If The MySQL server supports SSL, you may need to specify a CA truststore to verify the server's chain-of-trust. You may also need to specify a SSL keypair for the client side of the SSL connection. To configure the mysqld exporter to use a custom CA certificate, add the following to the mysql cnf file: ``` ssl-ca=/path/to/ca/file ``` To specify the client SSL keypair, add the following to the cnf. ``` ssl-key=/path/to/ssl/client/key ssl-cert=/path/to/ssl/client/cert ``` ## Using Docker You can deploy this exporter using the [prom/mysqld-exporter](https://registry.hub.docker.com/r/prom/mysqld-exporter/) Docker image. For example: ```bash docker network create my-mysql-network docker pull prom/mysqld-exporter docker run -d \ -p 9104:9104 \ --network my-mysql-network \ prom/mysqld-exporter --config.my-cnf= ``` ## heartbeat With `collect.heartbeat` enabled, mysqld_exporter will scrape replication delay measured by heartbeat mechanisms. [Pt-heartbeat][pth] is the reference heartbeat implementation supported. [pth]:https://www.percona.com/doc/percona-toolkit/2.2/pt-heartbeat.html ## Filtering enabled collectors The `mysqld_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families. For advanced use the `mysqld_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). ```yaml params: collect[]: - foo - bar ``` This can be useful for having different Prometheus servers collect specific metrics from targets. ## Example Rules There is a set of sample rules, alerts and dashboards available in the [mysqld-mixin](mysqld-mixin/) [circleci]: https://circleci.com/gh/prometheus/mysqld_exporter [hub]: https://hub.docker.com/r/prom/mysqld-exporter/ [travis]: https://travis-ci.org/prometheus/mysqld_exporter [quay]: https://quay.io/repository/prometheus/mysqld-exporter [parsetime]: https://github.com/go-sql-driver/mysql#parsetime mysqld_exporter-0.15.0/SECURITY.md000066400000000000000000000002541444546573200166460ustar00rootroot00000000000000# Reporting a security issue The Prometheus security policy, including how to report vulnerabilities, can be found here: mysqld_exporter-0.15.0/VERSION000066400000000000000000000000071444546573200161210ustar00rootroot000000000000000.15.0 mysqld_exporter-0.15.0/collector/000077500000000000000000000000001444546573200170425ustar00rootroot00000000000000mysqld_exporter-0.15.0/collector/binlog.go000066400000000000000000000067341444546573200206550ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW BINARY LOGS` package collector import ( "context" "database/sql" "fmt" "strconv" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. binlog = "binlog" // Queries. logbinQuery = `SELECT @@log_bin` binlogQuery = `SHOW BINARY LOGS` ) // Metric descriptors. var ( binlogSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "size_bytes"), "Combined size of all registered binlog files.", []string{}, nil, ) binlogFilesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "files"), "Number of registered binlog files.", []string{}, nil, ) binlogFileNumberDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, binlog, "file_number"), "The last binlog file number.", []string{}, nil, ) ) // ScrapeBinlogSize collects from `SHOW BINARY LOGS`. type ScrapeBinlogSize struct{} // Name of the Scraper. Should be unique. func (ScrapeBinlogSize) Name() string { return "binlog_size" } // Help describes the role of the Scraper. func (ScrapeBinlogSize) Help() string { return "Collect the current size of all registered binlog files" } // Version of MySQL from which scraper is available. func (ScrapeBinlogSize) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeBinlogSize) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var logBin uint8 err := db.QueryRowContext(ctx, logbinQuery).Scan(&logBin) if err != nil { return err } // If log_bin is OFF, do not run SHOW BINARY LOGS which explicitly produces MySQL error if logBin == 0 { return nil } masterLogRows, err := db.QueryContext(ctx, binlogQuery) if err != nil { return err } defer masterLogRows.Close() var ( size uint64 count uint64 filename string filesize uint64 encrypted string ) size = 0 count = 0 columns, err := masterLogRows.Columns() if err != nil { return err } columnCount := len(columns) for masterLogRows.Next() { switch columnCount { case 2: if err := masterLogRows.Scan(&filename, &filesize); err != nil { return nil } case 3: if err := masterLogRows.Scan(&filename, &filesize, &encrypted); err != nil { return nil } default: return fmt.Errorf("invalid number of columns: %q", columnCount) } size += filesize count++ } ch <- prometheus.MustNewConstMetric( binlogSizeDesc, prometheus.GaugeValue, float64(size), ) ch <- prometheus.MustNewConstMetric( binlogFilesDesc, prometheus.GaugeValue, float64(count), ) // The last row contains the last binlog file number. value, _ := strconv.ParseFloat(strings.Split(filename, ".")[1], 64) ch <- prometheus.MustNewConstMetric( binlogFileNumberDesc, prometheus.GaugeValue, value, ) return nil } // check interface var _ Scraper = ScrapeBinlogSize{} mysqld_exporter-0.15.0/collector/binlog_test.go000066400000000000000000000042161444546573200217050ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeBinlogSize(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(logbinQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) columns := []string{"Log_name", "File_size"} rows := sqlmock.NewRows(columns). AddRow("centos6-bin.000001", "1813"). AddRow("centos6-bin.000002", "120"). AddRow("centos6-bin.000444", "573009") mock.ExpectQuery(sanitizeQuery(binlogQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeBinlogSize{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 574942, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 3, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 444, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/collector.go000066400000000000000000000045661444546573200213720ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "bytes" "database/sql" "regexp" "strconv" "strings" "time" "github.com/prometheus/client_golang/prometheus" ) const ( // Exporter namespace. namespace = "mysql" // Math constant for picoseconds to seconds. picoSeconds = 1e12 // Query to check whether user/table/client stats are enabled. userstatCheckQuery = `SHOW GLOBAL VARIABLES WHERE Variable_Name='userstat' OR Variable_Name='userstat_running'` ) var logRE = regexp.MustCompile(`.+\.(\d+)$`) func newDesc(subsystem, name, help string) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), help, nil, nil, ) } func parseStatus(data sql.RawBytes) (float64, bool) { dataString := strings.ToLower(string(data)) switch dataString { case "yes", "on": return 1, true case "no", "off", "disabled": return 0, true // SHOW SLAVE STATUS Slave_IO_Running can return "Connecting" which is a non-running state. case "connecting": return 0, true // SHOW GLOBAL STATUS like 'wsrep_cluster_status' can return "Primary" or "non-Primary"/"Disconnected" case "primary": return 1, true case "non-primary", "disconnected": return 0, true } if ts, err := time.Parse("Jan 02 15:04:05 2006 MST", string(data)); err == nil { return float64(ts.Unix()), true } if ts, err := time.Parse("2006-01-02 15:04:05", string(data)); err == nil { return float64(ts.Unix()), true } if logNum := logRE.Find(data); logNum != nil { value, err := strconv.ParseFloat(string(logNum), 64) return value, err == nil } value, err := strconv.ParseFloat(string(data), 64) return value, err == nil } func parsePrivilege(data sql.RawBytes) (float64, bool) { if bytes.Equal(data, []byte("Y")) { return 1, true } if bytes.Equal(data, []byte("N")) { return 0, true } return -1, false } mysqld_exporter-0.15.0/collector/collector_test.go000066400000000000000000000032561444546573200224240ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "strings" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) type labelMap map[string]string type MetricResult struct { labels labelMap value float64 metricType dto.MetricType } func readMetric(m prometheus.Metric) MetricResult { pb := &dto.Metric{} m.Write(pb) labels := make(labelMap, len(pb.Label)) for _, v := range pb.Label { labels[v.GetName()] = v.GetValue() } if pb.Gauge != nil { return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} } if pb.Counter != nil { return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} } if pb.Untyped != nil { return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} } panic("Unsupported metric type") } func sanitizeQuery(q string) string { q = strings.Join(strings.Fields(q), " ") q = strings.Replace(q, "(", "\\(", -1) q = strings.Replace(q, ")", "\\)", -1) q = strings.Replace(q, "*", "\\*", -1) return q } mysqld_exporter-0.15.0/collector/engine_innodb.go000066400000000000000000000060531444546573200221730ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW ENGINE INNODB STATUS`. package collector import ( "context" "database/sql" "regexp" "strconv" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. innodb = "engine_innodb" // Query. engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS` ) // ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`. type ScrapeEngineInnodbStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeEngineInnodbStatus) Name() string { return "engine_innodb_status" } // Help describes the role of the Scraper. func (ScrapeEngineInnodbStatus) Help() string { return "Collect from SHOW ENGINE INNODB STATUS" } // Version of MySQL from which scraper is available. func (ScrapeEngineInnodbStatus) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeEngineInnodbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { rows, err := db.QueryContext(ctx, engineInnodbStatusQuery) if err != nil { return err } defer rows.Close() var typeCol, nameCol, statusCol string // First row should contain the necessary info. If many rows returned then it's unknown case. if rows.Next() { if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil { return err } } // 0 queries inside InnoDB, 0 queries in queue // 0 read views open inside InnoDB rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`) rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`) for _, line := range strings.Split(statusCol, "\n") { if data := rQueries.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."), prometheus.GaugeValue, value, ) value, _ = strconv.ParseFloat(data[2], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "queries_in_queue", "Queries in queue."), prometheus.GaugeValue, value, ) } else if data := rViews.FindStringSubmatch(line); data != nil { value, _ := strconv.ParseFloat(data[1], 64) ch <- prometheus.MustNewConstMetric( newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."), prometheus.GaugeValue, value, ) } } return nil } // check interface var _ Scraper = ScrapeEngineInnodbStatus{} mysqld_exporter-0.15.0/collector/engine_innodb_test.go000066400000000000000000000145401444546573200232320ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeEngineInnodbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() sample := ` ===================================== 2016-09-14 19:04:38 0x7fed21462700 INNODB MONITOR OUTPUT ===================================== Per second averages calculated from the last 30 seconds ----------------- BACKGROUND THREAD ----------------- srv_master_thread loops: 1 srv_active, 0 srv_shutdown, 49166 srv_idle srv_master_thread log flush and writes: 49165 ---------- SEMAPHORES ---------- OS WAIT ARRAY INFO: reservation count 15 OS WAIT ARRAY INFO: signal count 12 RW-shared spins 0, rounds 4, OS waits 2 RW-excl spins 0, rounds 0, OS waits 0 RW-sx spins 0, rounds 0, OS waits 0 Spin rounds per wait: 4.00 RW-shared, 0.00 RW-excl, 0.00 RW-sx ------------ TRANSACTIONS ------------ Trx id counter 67843 Purge done for trx's n:o < 55764 undo n:o < 0 state: running but idle History list length 779 LIST OF TRANSACTIONS FOR EACH SESSION: ---TRANSACTION 422131596298608, not started 0 lock struct(s), heap size 1136, 0 row lock(s) -------- FILE I/O -------- I/O thread 0 state: waiting for completed aio requests (insert buffer thread) I/O thread 1 state: waiting for completed aio requests (log thread) I/O thread 2 state: waiting for completed aio requests (read thread) I/O thread 3 state: waiting for completed aio requests (read thread) I/O thread 4 state: waiting for completed aio requests (read thread) I/O thread 5 state: waiting for completed aio requests (read thread) I/O thread 6 state: waiting for completed aio requests (write thread) I/O thread 7 state: waiting for completed aio requests (write thread) I/O thread 8 state: waiting for completed aio requests (write thread) I/O thread 9 state: waiting for completed aio requests (write thread) Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , ibuf aio reads:, log i/o's:, sync i/o's: Pending flushes (fsync) log: 0; buffer pool: 0 512 OS file reads, 57 OS file writes, 8 OS fsyncs 0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s ------------------------------------- INSERT BUFFER AND ADAPTIVE HASH INDEX ------------------------------------- Ibuf: size 1, free list len 0, seg size 2, 0 merges merged operations: insert 0, delete mark 0, delete 0 discarded operations: insert 0, delete mark 0, delete 0 Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) Hash table size 34673, node heap has 0 buffer(s) 0.00 hash searches/s, 0.00 non-hash searches/s --- LOG --- Log sequence number 37771171 Log flushed up to 37771171 Pages flushed up to 37771171 Last checkpoint at 37771162 Max checkpoint age 80826164 Checkpoint age target 78300347 Modified age 0 Checkpoint age 9 0 pending log flushes, 0 pending chkp writes 10 log i/o's done, 0.00 log i/o's/second ---------------------- BUFFER POOL AND MEMORY ---------------------- Total large memory allocated 139722752 Dictionary memory allocated 367821 Internal hash tables (constant factor + variable factor) Adaptive hash index 2252736 (2219072 + 33664) Page hash 139112 (buffer pool 0 only) Dictionary cache 922589 (554768 + 367821) File system 839328 (812272 + 27056) Lock system 334008 (332872 + 1136) Recovery system 0 (0 + 0) Buffer pool size 8191 Buffer pool size, bytes 0 Free buffers 7684 Database pages 507 Old database pages 0 Modified db pages 0 Pending reads 0 Pending writes: LRU 0, flush list 0, single page 0 Pages made young 0, not young 0 0.00 youngs/s, 0.00 non-youngs/s Pages read 473, created 34, written 36 0.00 reads/s, 0.00 creates/s, 0.00 writes/s No buffer pool page gets since the last printout Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s LRU len: 507, unzip_LRU len: 0 I/O sum[0]:cur[0], unzip sum[0]:cur[0] -------------- ROW OPERATIONS -------------- 661 queries inside InnoDB, 10 queries in queue 15 read views open inside InnoDB 0 RW transactions active inside InnoDB Process ID=1, Main thread ID=140656308950784, state: sleeping Number of rows inserted 0, updated 0, deleted 0, read 12 0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s ---------------------------- END OF INNODB MONITOR OUTPUT ============================ ` columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns).AddRow("InnoDB", "", sample) mock.ExpectQuery(sanitizeQuery(engineInnodbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeEngineInnodbStatus{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 661, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 15, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/engine_tokudb.go000066400000000000000000000047771444546573200222250ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW ENGINE TOKUDB STATUS`. package collector import ( "context" "database/sql" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. tokudb = "engine_tokudb" // Query. engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS` ) // ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. type ScrapeEngineTokudbStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeEngineTokudbStatus) Name() string { return "engine_tokudb_status" } // Help describes the role of the Scraper. func (ScrapeEngineTokudbStatus) Help() string { return "Collect from SHOW ENGINE TOKUDB STATUS" } // Version of MySQL from which scraper is available. func (ScrapeEngineTokudbStatus) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeEngineTokudbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { tokudbRows, err := db.QueryContext(ctx, engineTokudbStatusQuery) if err != nil { return err } defer tokudbRows.Close() var temp, key string var val sql.RawBytes for tokudbRows.Next() { if err := tokudbRows.Scan(&temp, &key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(tokudb, sanitizeTokudbMetric(key), "Generic metric from SHOW ENGINE TOKUDB STATUS."), prometheus.UntypedValue, floatVal, ) } } return nil } func sanitizeTokudbMetric(metricName string) string { replacements := map[string]string{ ">": "", ",": "", ":": "", "(": "", ")": "", " ": "_", "-": "_", "+": "and", "/": "and", } for r := range replacements { metricName = strings.Replace(metricName, r, replacements[r], -1) } return metricName } // check interface var _ Scraper = ScrapeEngineTokudbStatus{} mysqld_exporter-0.15.0/collector/engine_tokudb_test.go000066400000000000000000000063631444546573200232550ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestSanitizeTokudbMetric(t *testing.T) { samples := map[string]string{ "loader: number of calls to loader->close() that failed": "loader_number_of_calls_to_loader_close_that_failed", "ft: promotion: stopped anyway, after locking the child": "ft_promotion_stopped_anyway_after_locking_the_child", "ft: basement nodes deserialized with fixed-keysize": "ft_basement_nodes_deserialized_with_fixed_keysize", "memory: number of bytes used (requested + overhead)": "memory_number_of_bytes_used_requested_and_overhead", "ft: uncompressed / compressed bytes written (overall)": "ft_uncompressed_and_compressed_bytes_written_overall", } convey.Convey("Replacement tests", t, func() { for metric := range samples { got := sanitizeTokudbMetric(metric) convey.So(got, convey.ShouldEqual, samples[metric]) } }) } func TestScrapeEngineTokudbStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Type", "Name", "Status"} rows := sqlmock.NewRows(columns). AddRow("TokuDB", "indexer: number of calls to indexer->build() succeeded", "1"). AddRow("TokuDB", "ft: promotion: stopped anyway, after locking the child", "45316247"). AddRow("TokuDB", "memory: mallocator version", "3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784"). AddRow("TokuDB", "filesystem: most recent disk full", "Thu Jan 1 00:00:00 1970"). AddRow("TokuDB", "locktree: time spent ending the STO early (seconds)", "9115.904484") mock.ExpectQuery(sanitizeQuery(engineTokudbStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeEngineTokudbStatus{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricsExpected := []MetricResult{ {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 45316247, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 9115.904484, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricsExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/exporter.go000066400000000000000000000137051444546573200212470ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "fmt" "regexp" "strconv" "strings" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" ) // Metric name parts. const ( // Subsystem(s). exporter = "exporter" ) // SQL queries and parameters. const ( versionQuery = `SELECT @@version` // System variable params formatting. // See: https://github.com/go-sql-driver/mysql#system-variables sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27` timeoutParam = `lock_wait_timeout=%d` ) var ( versionRE = regexp.MustCompile(`^\d+\.\d+`) ) // Tunable flags. var ( exporterLockTimeout = kingpin.Flag( "exporter.lock_wait_timeout", "Set a lock_wait_timeout (in seconds) on the connection to avoid long metadata locking.", ).Default("2").Int() slowLogFilter = kingpin.Flag( "exporter.log_slow_filter", "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.", ).Default("false").Bool() ) // metric definition var ( mysqlUp = prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "up"), "Whether the MySQL server is up.", nil, nil, ) mysqlScrapeCollectorSuccess = prometheus.NewDesc( prometheus.BuildFQName(namespace, exporter, "collector_success"), "mysqld_exporter: Whether a collector succeeded.", []string{"collector"}, nil, ) mysqlScrapeDurationSeconds = prometheus.NewDesc( prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"), "Collector time duration.", []string{"collector"}, nil, ) ) // Verify if Exporter implements prometheus.Collector var _ prometheus.Collector = (*Exporter)(nil) // Exporter collects MySQL metrics. It implements prometheus.Collector. type Exporter struct { ctx context.Context logger log.Logger dsn string scrapers []Scraper } // New returns a new MySQL exporter for the provided DSN. func New(ctx context.Context, dsn string, scrapers []Scraper, logger log.Logger) *Exporter { // Setup extra params for the DSN, default to having a lock timeout. dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)} if *slowLogFilter { dsnParams = append(dsnParams, sessionSettingsParam) } if strings.Contains(dsn, "?") { dsn = dsn + "&" } else { dsn = dsn + "?" } dsn += strings.Join(dsnParams, "&") return &Exporter{ ctx: ctx, logger: logger, dsn: dsn, scrapers: scrapers, } } // Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { ch <- mysqlUp ch <- mysqlScrapeDurationSeconds ch <- mysqlScrapeCollectorSuccess } // Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { up := e.scrape(e.ctx, ch) ch <- prometheus.MustNewConstMetric(mysqlUp, prometheus.GaugeValue, up) } // scrape collects metrics from the target, returns an up metric value. func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) float64 { var err error scrapeTime := time.Now() db, err := sql.Open("mysql", e.dsn) if err != nil { level.Error(e.logger).Log("msg", "Error opening connection to database", "err", err) return 0.0 } defer db.Close() // By design exporter should use maximum one connection per request. db.SetMaxOpenConns(1) db.SetMaxIdleConns(1) // Set max lifetime for a connection. db.SetConnMaxLifetime(1 * time.Minute) if err := db.PingContext(ctx); err != nil { level.Error(e.logger).Log("msg", "Error pinging mysqld", "err", err) return 0.0 } ch <- prometheus.MustNewConstMetric(mysqlScrapeDurationSeconds, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection") version := getMySQLVersion(db, e.logger) var wg sync.WaitGroup defer wg.Wait() for _, scraper := range e.scrapers { if version < scraper.Version() { continue } wg.Add(1) go func(scraper Scraper) { defer wg.Done() label := "collect." + scraper.Name() scrapeTime := time.Now() collectorSuccess := 1.0 if err := scraper.Scrape(ctx, db, ch, log.With(e.logger, "scraper", scraper.Name())); err != nil { level.Error(e.logger).Log("msg", "Error from scraper", "scraper", scraper.Name(), "target", e.getTargetFromDsn(), "err", err) collectorSuccess = 0.0 } ch <- prometheus.MustNewConstMetric(mysqlScrapeCollectorSuccess, prometheus.GaugeValue, collectorSuccess, label) ch <- prometheus.MustNewConstMetric(mysqlScrapeDurationSeconds, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label) }(scraper) } return 1.0 } func (e *Exporter) getTargetFromDsn() string { // Get target from DSN. dsnConfig, err := mysql.ParseDSN(e.dsn) if err != nil { level.Error(e.logger).Log("msg", "Error parsing DSN", "err", err) return "" } return dsnConfig.Addr } func getMySQLVersion(db *sql.DB, logger log.Logger) float64 { var versionStr string var versionNum float64 if err := db.QueryRow(versionQuery).Scan(&versionStr); err == nil { versionNum, _ = strconv.ParseFloat(versionRE.FindString(versionStr), 64) } else { level.Debug(logger).Log("msg", "Error querying version", "err", err) } // If we can't match/parse the version, set it some big value that matches all versions. if versionNum == 0 { level.Debug(logger).Log("msg", "Error parsing version string", "version", versionStr) versionNum = 999 } return versionNum } mysqld_exporter-0.15.0/collector/exporter_test.go000066400000000000000000000037461444546573200223120ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "os" "testing" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/smartystreets/goconvey/convey" ) const dsn = "root@/mysql" func TestExporter(t *testing.T) { if testing.Short() { t.Skip("-short is passed, skipping test") } exporter := New( context.Background(), dsn, []Scraper{ ScrapeGlobalStatus{}, }, log.NewNopLogger(), ) convey.Convey("Metrics describing", t, func() { ch := make(chan *prometheus.Desc) go func() { exporter.Describe(ch) close(ch) }() for range ch { } }) convey.Convey("Metrics collection", t, func() { ch := make(chan prometheus.Metric) go func() { exporter.Collect(ch) close(ch) }() for m := range ch { got := readMetric(m) if got.labels[model.MetricNameLabel] == "mysql_up" { convey.So(got.value, convey.ShouldEqual, 1) } } }) } func TestGetMySQLVersion(t *testing.T) { if testing.Short() { t.Skip("-short is passed, skipping test") } logger := log.NewLogfmtLogger(os.Stderr) logger = level.NewFilter(logger, level.AllowDebug()) convey.Convey("Version parsing", t, func() { db, err := sql.Open("mysql", dsn) convey.So(err, convey.ShouldBeNil) defer db.Close() convey.So(getMySQLVersion(db, logger), convey.ShouldBeBetweenOrEqual, 5.6, 11.0) }) } mysqld_exporter-0.15.0/collector/global_status.go000066400000000000000000000166631444546573200222500ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW GLOBAL STATUS`. package collector import ( "context" "database/sql" "regexp" "strconv" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Scrape query. globalStatusQuery = `SHOW GLOBAL STATUS` // Subsystem. globalStatus = "global_status" ) // Regexp to match various groups of status vars. var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`) // Metric descriptors. var ( globalCommandsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "commands_total"), "Total number of executed MySQL commands.", []string{"command"}, nil, ) globalHandlerDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "handlers_total"), "Total number of executed MySQL handlers.", []string{"handler"}, nil, ) globalConnectionErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "connection_errors_total"), "Total number of MySQL connection errors.", []string{"error"}, nil, ) globalBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_pages"), "Innodb buffer pool pages by state.", []string{"state"}, nil, ) globalBufferPoolDirtyPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_dirty_pages"), "Innodb buffer pool dirty pages.", []string{}, nil, ) globalBufferPoolPageChangesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "buffer_pool_page_changes_total"), "Innodb buffer pool page state changes.", []string{"operation"}, nil, ) globalInnoDBRowOpsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "innodb_row_ops_total"), "Total number of MySQL InnoDB row operations.", []string{"operation"}, nil, ) globalPerformanceSchemaLostDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, globalStatus, "performance_schema_lost_total"), "Total number of MySQL instrumentations that could not be loaded or created due to memory constraints.", []string{"instrumentation"}, nil, ) ) // ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`. type ScrapeGlobalStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeGlobalStatus) Name() string { return globalStatus } // Help describes the role of the Scraper. func (ScrapeGlobalStatus) Help() string { return "Collect from SHOW GLOBAL STATUS" } // Version of MySQL from which scraper is available. func (ScrapeGlobalStatus) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeGlobalStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { globalStatusRows, err := db.QueryContext(ctx, globalStatusQuery) if err != nil { return err } defer globalStatusRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "wsrep_local_state_uuid": "", "wsrep_cluster_state_uuid": "", "wsrep_provider_version": "", "wsrep_evs_repl_latency": "", } for globalStatusRows.Next() { if err := globalStatusRows.Scan(&key, &val); err != nil { return err } if floatVal, ok := parseStatus(val); ok { // Unparsable values are silently skipped. key = validPrometheusName(key) match := globalStatusRE.FindStringSubmatch(key) if match == nil { ch <- prometheus.MustNewConstMetric( newDesc(globalStatus, key, "Generic metric from SHOW GLOBAL STATUS."), prometheus.UntypedValue, floatVal, ) continue } switch match[1] { case "com": ch <- prometheus.MustNewConstMetric( globalCommandsDesc, prometheus.CounterValue, floatVal, match[2], ) case "handler": ch <- prometheus.MustNewConstMetric( globalHandlerDesc, prometheus.CounterValue, floatVal, match[2], ) case "connection_errors": ch <- prometheus.MustNewConstMetric( globalConnectionErrorsDesc, prometheus.CounterValue, floatVal, match[2], ) case "innodb_buffer_pool_pages": switch match[2] { case "data", "free", "misc", "old": ch <- prometheus.MustNewConstMetric( globalBufferPoolPagesDesc, prometheus.GaugeValue, floatVal, match[2], ) case "dirty": ch <- prometheus.MustNewConstMetric( globalBufferPoolDirtyPagesDesc, prometheus.GaugeValue, floatVal, ) case "total": continue default: ch <- prometheus.MustNewConstMetric( globalBufferPoolPageChangesDesc, prometheus.CounterValue, floatVal, match[2], ) } case "innodb_rows": ch <- prometheus.MustNewConstMetric( globalInnoDBRowOpsDesc, prometheus.CounterValue, floatVal, match[2], ) case "performance_schema": ch <- prometheus.MustNewConstMetric( globalPerformanceSchemaLostDesc, prometheus.CounterValue, floatVal, match[2], ) } } else if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_galera_variables_info metric. if textItems["wsrep_local_state_uuid"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "status_info"), "PXC/Galera status information.", []string{"wsrep_local_state_uuid", "wsrep_cluster_state_uuid", "wsrep_provider_version"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_local_state_uuid"], textItems["wsrep_cluster_state_uuid"], textItems["wsrep_provider_version"], ) } // mysql_galera_evs_repl_latency if textItems["wsrep_evs_repl_latency"] != "" { type evsValue struct { name string value float64 index int help string } evsMap := []evsValue{ evsValue{name: "min_seconds", value: 0, index: 0, help: "PXC/Galera group communication latency. Min value."}, evsValue{name: "avg_seconds", value: 0, index: 1, help: "PXC/Galera group communication latency. Avg value."}, evsValue{name: "max_seconds", value: 0, index: 2, help: "PXC/Galera group communication latency. Max value."}, evsValue{name: "stdev", value: 0, index: 3, help: "PXC/Galera group communication latency. Standard Deviation."}, evsValue{name: "sample_size", value: 0, index: 4, help: "PXC/Galera group communication latency. Sample Size."}, } evsParsingSuccess := true values := strings.Split(textItems["wsrep_evs_repl_latency"], "/") if len(evsMap) == len(values) { for i, v := range evsMap { evsMap[i].value, err = strconv.ParseFloat(values[v.index], 64) if err != nil { evsParsingSuccess = false } } if evsParsingSuccess { for _, v := range evsMap { key := prometheus.BuildFQName(namespace, "galera_evs_repl_latency", v.name) desc := prometheus.NewDesc(key, v.help, []string{}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v.value) } } } } return nil } // check interface var _ Scraper = ScrapeGlobalStatus{} mysqld_exporter-0.15.0/collector/global_status_test.go000066400000000000000000000125401444546573200232750ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeGlobalStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("Com_alter_db", "1"). AddRow("Com_show_status", "2"). AddRow("Com_select", "3"). AddRow("Connection_errors_internal", "4"). AddRow("Handler_commit", "5"). AddRow("Innodb_buffer_pool_pages_data", "6"). AddRow("Innodb_buffer_pool_pages_flushed", "7"). AddRow("Innodb_buffer_pool_pages_dirty", "7"). AddRow("Innodb_buffer_pool_pages_free", "8"). AddRow("Innodb_buffer_pool_pages_misc", "9"). AddRow("Innodb_buffer_pool_pages_old", "10"). AddRow("Innodb_buffer_pool_pages_total", "11"). AddRow("Innodb_buffer_pool_pages_lru_flushed", "13"). AddRow("Innodb_buffer_pool_pages_made_not_young", "14"). AddRow("Innodb_buffer_pool_pages_made_young", "15"). AddRow("Innodb_rows_read", "8"). AddRow("Performance_schema_users_lost", "9"). AddRow("Slave_running", "OFF"). AddRow("Ssl_version", ""). AddRow("Uptime", "10"). AddRow("validate_password.dictionary_file_words_count", "11"). AddRow("wsrep_cluster_status", "Primary"). AddRow("wsrep_local_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_cluster_state_uuid", "6c06e583-686f-11e6-b9e3-8336ad58138c"). AddRow("wsrep_provider_version", "3.16(r5c765eb)"). AddRow("wsrep_evs_repl_latency", "0.000227664/0.00034135/0.000544298/6.03708e-05/212") mock.ExpectQuery(sanitizeQuery(globalStatusQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeGlobalStatus{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"command": "alter_db"}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "show_status"}, value: 2, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"command": "select"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"error": "internal"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"handler": "commit"}, value: 5, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"operation": "flushed"}, value: 7, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 7, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "free"}, value: 8, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "misc"}, value: 9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "old"}, value: 10, metricType: dto.MetricType_GAUGE}, //{labels: labelMap{"state": "total_pages"}, value: 11, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"operation": "lru_flushed"}, value: 13, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"operation": "made_not_young"}, value: 14, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"operation": "made_young"}, value: 15, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"operation": "read"}, value: 8, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"instrumentation": "users_lost"}, value: 9, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 10, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 11, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"wsrep_local_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_cluster_state_uuid": "6c06e583-686f-11e6-b9e3-8336ad58138c", "wsrep_provider_version": "3.16(r5c765eb)"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0.000227664, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0.00034135, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0.000544298, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 6.03708e-05, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 212, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/global_variables.go000066400000000000000000000434341444546573200226710ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW GLOBAL VARIABLES`. package collector import ( "context" "database/sql" "regexp" "strconv" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Metric subsystem globalVariables = "global_variables" // Metric SQL Queries. globalVariablesQuery = `SHOW GLOBAL VARIABLES` ) var ( // Map known global variables to help strings. Unknown will be mapped to generic gauges. globalVariablesHelp = map[string]string{ // https://github.com/facebook/mysql-5.6/wiki/New-MySQL-RocksDB-Server-Variables "rocksdb_access_hint_on_compaction_start": "File access pattern once a compaction is started, applied to all input files of a compaction.", "rocksdb_advise_random_on_open": "Hint of random access to the filesystem when a data file is opened.", "rocksdb_allow_concurrent_memtable_write": "Allow multi-writers to update memtables in parallel.", "rocksdb_allow_mmap_reads": "Allow the OS to mmap a data file for reads.", "rocksdb_allow_mmap_writes": "Allow the OS to mmap a data file for writes.", "rocksdb_block_cache_size": "Size of the LRU block cache in RocksDB. This memory is reserved for the block cache, which is in addition to any filesystem caching that may occur.", "rocksdb_block_restart_interval": "Number of keys for each set of delta encoded data.", "rocksdb_block_size_deviation": "If the percentage of free space in the current data block (size specified in rocksdb-block-size) is less than this amount, close the block (and write record to new block).", "rocksdb_block_size": "Size of the data block for reading sst files.", "rocksdb_bulk_load_size": "Sets the number of keys to accumulate before committing them to the storage engine during bulk loading.", "rocksdb_bulk_load": "When set, MyRocks will ignore checking keys for uniqueness or acquiring locks during transactions. This option should only be used when the application is certain there are no row conflicts, such as when setting up a new MyRocks instance from an existing MySQL dump.", "rocksdb_bytes_per_sync": "Enables the OS to sync out file writes as data files are created.", "rocksdb_cache_index_and_filter_blocks": "Requests RocksDB to use the block cache for caching the index and bloomfilter data blocks from each data file. If this is not set, RocksDB will allocate additional memory to maintain these data blocks.", "rocksdb_checksums_pct": "Sets the percentage of rows to calculate and set MyRocks checksums.", "rocksdb_collect_sst_properties": "Enables collecting statistics of each data file for improving optimizer behavior.", "rocksdb_commit_in_the_middle": "Commit rows implicitly every rocksdb-bulk-load-size, during bulk load/insert/update/deletes.", "rocksdb_compaction_readahead_size": "When non-zero, bigger reads are performed during compaction. Useful if running RocksDB on spinning disks, compaction will do sequential instead of random reads.", "rocksdb_compaction_sequential_deletes_count_sd": "If enabled, factor in single deletes as part of rocksdb-compaction-sequential-deletes.", "rocksdb_compaction_sequential_deletes_file_size": "Threshold to trigger compaction if the number of sequential keys that are all delete markers exceed this value. While this compaction helps reduce request latency by removing delete markers, it can increase write rates of RocksDB.", "rocksdb_compaction_sequential_deletes_window": "Threshold to trigger compaction if, within a sliding window of keys, there exists this parameter's number of delete marker.", "rocksdb_compaction_sequential_deletes": "Enables triggering of compaction when the number of delete markers in a data file exceeds a certain threshold. Depending on workload patterns, RocksDB can potentially maintain large numbers of delete markers and increase latency of all queries.", "rocksdb_create_if_missing": "Allows creating the RocksDB database if it does not exist.", "rocksdb_create_missing_column_families": "Allows creating new column families if they did not exist.", "rocksdb_db_write_buffer_size": "Size of the memtable used to store writes within RocksDB. This is the size per column family. Once this size is reached, a flush of the memtable to persistent media occurs.", "rocksdb_deadlock_detect": "Enables deadlock detection in RocksDB.", "rocksdb_debug_optimizer_no_zero_cardinality": "Test only to prevent MyRocks from calculating cardinality.", "rocksdb_delayed_write_rate": "When RocksDB hits the soft limits/thresholds for writes, such as soft_pending_compaction_bytes_limit being hit, or level0_slowdown_writes_trigger being hit, RocksDB will slow the write rate down to the value of this parameter as bytes/second.", "rocksdb_delete_obsolete_files_period_micros": "The periodicity of when obsolete files get deleted, but does not affect files removed through compaction.", "rocksdb_enable_bulk_load_api": "Enables using the SSTFileWriter feature in RocksDB, which bypasses the memtable, but this requires keys to be inserted into the table in either ascending or descending order. If disabled, bulk loading uses the normal write path via the memtable and does not keys to be inserted in any order.", "rocksdb_enable_thread_tracking": "Set to allow RocksDB to track the status of threads accessing the database.", "rocksdb_enable_write_thread_adaptive_yield": "Set to allow RocksDB write batch group leader to wait up to the max time allowed before blocking on a mutex, allowing an increase in throughput for concurrent workloads.", "rocksdb_error_if_exists": "If set, reports an error if an existing database already exists.", "rocksdb_flush_log_at_trx_commit": "Sync'ing on transaction commit similar to innodb-flush-log-at-trx-commit: 0 - never sync, 1 - always sync, 2 - sync based on a timer controlled via rocksdb-background-sync", "rocksdb_flush_memtable_on_analyze": "When analyze table is run, determines of the memtable should be flushed so that data in the memtable is also used for calculating stats.", "rocksdb_force_compute_memtable_stats": "When enabled, also include data in the memtables for index statistics calculations used by the query optimizer. Greater accuracy, but requires more cpu.", "rocksdb_force_flush_memtable_now": "Triggers MyRocks to flush the memtables out to the data files.", "rocksdb_force_index_records_in_range": "When force index is used, a non-zero value here will be used as the number of rows to be returned to the query optimizer when trying to determine the estimated number of rows.", "rocksdb_hash_index_allow_collision": "Enables RocksDB to allow hashes to collide (uses less memory). Otherwise, the full prefix is stored to prevent hash collisions.", "rocksdb_keep_log_file_num": "Sets the maximum number of info LOG files to keep around.", "rocksdb_lock_scanned_rows": "If enabled, rows that are scanned during UPDATE remain locked even if they have not been updated.", "rocksdb_lock_wait_timeout": "Sets the number of seconds MyRocks will wait to acquire a row lock before aborting the request.", "rocksdb_log_file_time_to_roll": "Sets the number of seconds a info LOG file captures before rolling to a new LOG file.", "rocksdb_manifest_preallocation_size": "Sets the number of bytes to preallocate for the MANIFEST file in RocksDB and reduce possible random I/O on XFS. MANIFEST files are used to store information about column families, levels, active files, etc.", "rocksdb_max_open_files": "Sets a limit on the maximum number of file handles opened by RocksDB.", "rocksdb_max_row_locks": "Sets a limit on the maximum number of row locks held by a transaction before failing it.", "rocksdb_max_subcompactions": "For each compaction job, the maximum threads that will work on it simultaneously (i.e. subcompactions). A value of 1 means no subcompactions.", "rocksdb_max_total_wal_size": "Sets a limit on the maximum size of WAL files kept around. Once this limit is hit, RocksDB will force the flushing of memtables to reduce the size of WAL files.", "rocksdb_merge_buf_size": "Size (in bytes) of the merge buffers used to accumulate data during secondary key creation. During secondary key creation the data, we avoid updating the new indexes through the memtable and L0 by writing new entries directly to the lowest level in the database. This requires the values to be sorted so we use a merge/sort algorithm. This setting controls how large the merge buffers are. The default is 64Mb.", "rocksdb_merge_combine_read_size": "Size (in bytes) of the merge combine buffer used in the merge/sort algorithm as described in rocksdb-merge-buf-size.", "rocksdb_new_table_reader_for_compaction_inputs": "Indicates whether RocksDB should create a new file descriptor and table reader for each compaction input. Doing so may use more memory but may allow pre-fetch options to be specified for compaction input files without impacting table readers used for user queries.", "rocksdb_no_block_cache": "Disables using the block cache for a column family.", "rocksdb_paranoid_checks": "Forces RocksDB to re-read a data file that was just created to verify correctness.", "rocksdb_pause_background_work": "Test only to start and stop all background compactions within RocksDB.", "rocksdb_perf_context_level": "Sets the level of information to capture via the perf context plugins.", "rocksdb_persistent_cache_size_mb": "The size (in Mb) to allocate to the RocksDB persistent cache if desired.", "rocksdb_pin_l0_filter_and_index_blocks_in_cache": "If rocksdb-cache-index-and-filter-blocks is true then this controls whether RocksDB 'pins' the filter and index blocks in the cache.", "rocksdb_print_snapshot_conflict_queries": "If this is true, MyRocks will log queries that generate snapshot conflicts into the .err log.", "rocksdb_rate_limiter_bytes_per_sec": "Controls the rate at which RocksDB is allowed to write to media via memtable flushes and compaction.", "rocksdb_records_in_range": "Test only to override the value returned by records-in-range.", "rocksdb_seconds_between_stat_computes": "Sets the number of seconds between recomputation of table statistics for the optimizer.", "rocksdb_signal_drop_index_thread": "Test only to signal the MyRocks drop index thread.", "rocksdb_skip_bloom_filter_on_read": "Indicates whether the bloom filters should be skipped on reads.", "rocksdb_skip_fill_cache": "Requests MyRocks to skip caching data on read requests.", "rocksdb_stats_dump_period_sec": "Sets the number of seconds to perform a RocksDB stats dump to the info LOG files.", "rocksdb_store_row_debug_checksums": "Include checksums when writing index/table records.", "rocksdb_strict_collation_check": "Enables MyRocks to check and verify table indexes have the proper collation settings.", "rocksdb_table_cache_numshardbits": "Sets the number of table caches within RocksDB.", "rocksdb_use_adaptive_mutex": "Enables adaptive mutexes in RocksDB which spins in user space before resorting to the kernel.", "rocksdb_use_direct_reads": "Enable direct IO when opening a file for read/write. This means that data will not be cached or buffered.", "rocksdb_use_fsync": "Requires RocksDB to use fsync instead of fdatasync when requesting a sync of a data file.", "rocksdb_validate_tables": "Requires MyRocks to verify all of MySQL's .frm files match tables stored in RocksDB.", "rocksdb_verify_row_debug_checksums": "Verify checksums when reading index/table records.", "rocksdb_wal_bytes_per_sync": "Controls the rate at which RocksDB writes out WAL file data.", "rocksdb_wal_recovery_mode": "Sets RocksDB's level of tolerance when recovering the WAL files after a system crash.", "rocksdb_wal_size_limit_mb": "Maximum size the RocksDB WAL is allow to grow to. When this size is exceeded rocksdb attempts to flush sufficient memtables to allow for the deletion of the oldest log.", "rocksdb_wal_ttl_seconds": "No WAL file older than this value should exist.", "rocksdb_whole_key_filtering": "Enables the bloomfilter to use the whole key for filtering instead of just the prefix. In order for this to be efficient, lookups should use the whole key for matching.", "rocksdb_write_disable_wal": "Disables logging data to the WAL files. Useful for bulk loading.", "rocksdb_write_ignore_missing_column_families": "If 1, then writes to column families that do not exist is ignored by RocksDB.", } ) // ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`. type ScrapeGlobalVariables struct{} // Name of the Scraper. Should be unique. func (ScrapeGlobalVariables) Name() string { return globalVariables } // Help describes the role of the Scraper. func (ScrapeGlobalVariables) Help() string { return "Collect from SHOW GLOBAL VARIABLES" } // Version of MySQL from which scraper is available. func (ScrapeGlobalVariables) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeGlobalVariables) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { globalVariablesRows, err := db.QueryContext(ctx, globalVariablesQuery) if err != nil { return err } defer globalVariablesRows.Close() var key string var val sql.RawBytes var textItems = map[string]string{ "innodb_version": "", "version": "", "version_comment": "", "wsrep_cluster_name": "", "wsrep_provider_options": "", "tx_isolation": "", "transaction_isolation": "", } for globalVariablesRows.Next() { if err = globalVariablesRows.Scan(&key, &val); err != nil { return err } key = validPrometheusName(key) if floatVal, ok := parseStatus(val); ok { help := globalVariablesHelp[key] if help == "" { help = "Generic gauge metric from SHOW GLOBAL VARIABLES." } ch <- prometheus.MustNewConstMetric( newDesc(globalVariables, key, help), prometheus.GaugeValue, floatVal, ) continue } if _, ok := textItems[key]; ok { textItems[key] = string(val) } } // mysql_version_info metric. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "version", "info"), "MySQL version and distribution.", []string{"innodb_version", "version", "version_comment"}, nil), prometheus.GaugeValue, 1, textItems["innodb_version"], textItems["version"], textItems["version_comment"], ) // mysql_galera_variables_info metric. if textItems["wsrep_cluster_name"] != "" { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "galera", "variables_info"), "PXC/Galera variables information.", []string{"wsrep_cluster_name"}, nil), prometheus.GaugeValue, 1, textItems["wsrep_cluster_name"], ) } // mysql_galera_gcache_size_bytes metric. if textItems["wsrep_provider_options"] != "" { ch <- prometheus.MustNewConstMetric( newDesc("galera", "gcache_size_bytes", "PXC/Galera gcache size."), prometheus.GaugeValue, parseWsrepProviderOptions(textItems["wsrep_provider_options"]), ) } // mysql_transaction_isolation metric. if textItems["transaction_isolation"] != "" || textItems["tx_isolation"] != "" { level := textItems["transaction_isolation"] if level == "" { level = textItems["tx_isolation"] } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "transaction", "isolation"), "MySQL transaction isolation.", []string{"level"}, nil), prometheus.GaugeValue, 1, level, ) } return nil } // parseWsrepProviderOptions parse wsrep_provider_options to get gcache.size in bytes. func parseWsrepProviderOptions(opts string) float64 { var val float64 r, _ := regexp.Compile(`gcache.size = (\d+)([MG]?);`) data := r.FindStringSubmatch(opts) if data == nil { return 0 } val, _ = strconv.ParseFloat(data[1], 64) switch data[2] { case "M": val = val * 1024 * 1024 case "G": val = val * 1024 * 1024 * 1024 } return val } func validPrometheusName(s string) string { nameRe := regexp.MustCompile("([^a-zA-Z0-9_])") s = nameRe.ReplaceAllString(s, "_") s = strings.ToLower(s) return s } // check interface var _ Scraper = ScrapeGlobalVariables{} mysqld_exporter-0.15.0/collector/global_variables_test.go000066400000000000000000000217011444546573200237210ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeGlobalVariables(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Variable_name", "Value"} rows := sqlmock.NewRows(columns). AddRow("wait_timeout", "28800"). AddRow("version_compile_os", "Linux"). AddRow("userstat", "OFF"). AddRow("transaction_prealloc_size", "4096"). AddRow("tx_isolation", "REPEATABLE-READ"). AddRow("tmp_table_size", "16777216"). AddRow("tmpdir", "/tmp"). AddRow("sync_binlog", "0"). AddRow("sync_frm", "ON"). AddRow("slow_launch_time", "2"). AddRow("innodb_version", "5.6.30-76.3"). AddRow("version", "5.6.30-76.3-56"). AddRow("version_comment", "Percona XtraDB Cluster..."). AddRow("wsrep_cluster_name", "supercluster"). AddRow("wsrep_provider_options", "base_dir = /var/lib/mysql/; base_host = 10.91.142.82; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 128M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.142.82; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;") mock.ExpectQuery(globalVariablesQuery).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeGlobalVariables{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{}, value: 28800, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 4096, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 16777216, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"innodb_version": "5.6.30-76.3", "version": "5.6.30-76.3-56", "version_comment": "Percona XtraDB Cluster..."}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"wsrep_cluster_name": "supercluster"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: 134217728, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"level": "REPEATABLE-READ"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } } func TestParseWsrepProviderOptions(t *testing.T) { testE := "" testM := "base_dir = /var/lib/mysql/; base_host = 10.91.142.82; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 128M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.142.82; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;" testG := "base_dir = /var/lib/mysql/; base_host = 10.91.194.244; base_port = 4567; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.size = 2G; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; gcs.recv_q_hard_limit = 9223372036854775807; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; gmcast.listen_addr = tcp://0.0.0.0:4567; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ist.recv_addr = 10.91.194.244; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT30S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992;" testB := "gcache.page_size = 128M; gcache.size = 131072; gcomm.thread_prio = ;" convey.Convey("Parse wsrep_provider_options", t, func() { convey.So(parseWsrepProviderOptions(testE), convey.ShouldEqual, 0) convey.So(parseWsrepProviderOptions(testM), convey.ShouldEqual, 128*1024*1024) convey.So(parseWsrepProviderOptions(testG), convey.ShouldEqual, int64(2*1024*1024*1024)) convey.So(parseWsrepProviderOptions(testB), convey.ShouldEqual, 131072) }) } mysqld_exporter-0.15.0/collector/heartbeat.go000066400000000000000000000100001444546573200213170ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape heartbeat data. package collector import ( "context" "database/sql" "fmt" "strconv" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // heartbeat is the Metric subsystem we use. heartbeat = "heartbeat" // heartbeatQuery is the query used to fetch the stored and current // timestamps. %s will be replaced by the database and table name. // The second column allows gets the server timestamp at the exact same // time the query is run. heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(%s), server_id from `%s`.`%s`" ) var ( collectHeartbeatDatabase = kingpin.Flag( "collect.heartbeat.database", "Database from where to collect heartbeat data", ).Default("heartbeat").String() collectHeartbeatTable = kingpin.Flag( "collect.heartbeat.table", "Table from where to collect heartbeat data", ).Default("heartbeat").String() collectHeartbeatUtc = kingpin.Flag( "collect.heartbeat.utc", "Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)", ).Bool() ) // Metric descriptors. var ( HeartbeatStoredDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "stored_timestamp_seconds"), "Timestamp stored in the heartbeat table.", []string{"server_id"}, nil, ) HeartbeatNowDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "now_timestamp_seconds"), "Timestamp of the current server.", []string{"server_id"}, nil, ) ) // ScrapeHeartbeat scrapes from the heartbeat table. // This is mainly targeting pt-heartbeat, but will work with any heartbeat // implementation that writes to a table with two columns: // CREATE TABLE heartbeat ( // // ts varchar(26) NOT NULL, // server_id int unsigned NOT NULL PRIMARY KEY, // // ); type ScrapeHeartbeat struct{} // Name of the Scraper. Should be unique. func (ScrapeHeartbeat) Name() string { return "heartbeat" } // Help describes the role of the Scraper. func (ScrapeHeartbeat) Help() string { return "Collect from heartbeat" } // Version of MySQL from which scraper is available. func (ScrapeHeartbeat) Version() float64 { return 5.1 } // nowExpr returns a current timestamp expression. func nowExpr() string { if *collectHeartbeatUtc { return "UTC_TIMESTAMP(6)" } return "NOW(6)" } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { query := fmt.Sprintf(heartbeatQuery, nowExpr(), *collectHeartbeatDatabase, *collectHeartbeatTable) heartbeatRows, err := db.QueryContext(ctx, query) if err != nil { return err } defer heartbeatRows.Close() var ( now, ts sql.RawBytes serverId int ) for heartbeatRows.Next() { if err := heartbeatRows.Scan(&ts, &now, &serverId); err != nil { return err } tsFloatVal, err := strconv.ParseFloat(string(ts), 64) if err != nil { return err } nowFloatVal, err := strconv.ParseFloat(string(now), 64) if err != nil { return err } serverId := strconv.Itoa(serverId) ch <- prometheus.MustNewConstMetric( HeartbeatNowDesc, prometheus.GaugeValue, nowFloatVal, serverId, ) ch <- prometheus.MustNewConstMetric( HeartbeatStoredDesc, prometheus.GaugeValue, tsFloatVal, serverId, ) } return nil } // check interface var _ Scraper = ScrapeHeartbeat{} mysqld_exporter-0.15.0/collector/heartbeat_test.go000066400000000000000000000060161444546573200223720ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) type ScrapeHeartbeatTestCase struct { Args []string Columns []string Query string } var ScrapeHeartbeatTestCases = []ScrapeHeartbeatTestCase{ { []string{ "--collect.heartbeat.database", "heartbeat-test", "--collect.heartbeat.table", "heartbeat-test", }, []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"}, "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`", }, { []string{ "--collect.heartbeat.database", "heartbeat-test", "--collect.heartbeat.table", "heartbeat-test", "--collect.heartbeat.utc", }, []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(UTC_TIMESTAMP(6))", "server_id"}, "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(UTC_TIMESTAMP(6)), server_id from `heartbeat-test`.`heartbeat-test`", }, } func TestScrapeHeartbeat(t *testing.T) { for _, tt := range ScrapeHeartbeatTestCases { t.Run(fmt.Sprint(tt.Args), func(t *testing.T) { _, err := kingpin.CommandLine.Parse(tt.Args) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() rows := sqlmock.NewRows(tt.Columns). AddRow("1487597613.001320", "1487598113.448042", 1) mock.ExpectQuery(sanitizeQuery(tt.Query)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeHeartbeat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "1"}, value: 1487598113.448042, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "1"}, value: 1487597613.00132, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } }) } } mysqld_exporter-0.15.0/collector/info_schema.go000066400000000000000000000012321444546573200216420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector // Subsystem. const informationSchema = "info_schema" mysqld_exporter-0.15.0/collector/info_schema_auto_increment.go000066400000000000000000000064341444546573200247470ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape auto_increment column information. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaAutoIncrementQuery = ` SELECT table_schema, table_name, column_name, auto_increment, pow(2, case data_type when 'tinyint' then 7 when 'smallint' then 15 when 'mediumint' then 23 when 'int' then 31 when 'bigint' then 63 end+(column_type like '% unsigned'))-1 as max_int FROM information_schema.columns c STRAIGHT_JOIN information_schema.tables t USING (table_schema,table_name) WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL ` // Metric descriptors. var ( globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"), "The current value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) globalInfoSchemaAutoIncrementMaxDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column_max"), "The max value of an auto_increment column from information_schema.", []string{"schema", "table", "column"}, nil, ) ) // ScrapeAutoIncrementColumns collects auto_increment column information. type ScrapeAutoIncrementColumns struct{} // Name of the Scraper. Should be unique. func (ScrapeAutoIncrementColumns) Name() string { return "auto_increment.columns" } // Help describes the role of the Scraper. func (ScrapeAutoIncrementColumns) Help() string { return "Collect auto_increment columns and max values from information_schema" } // Version of MySQL from which scraper is available. func (ScrapeAutoIncrementColumns) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeAutoIncrementColumns) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { autoIncrementRows, err := db.QueryContext(ctx, infoSchemaAutoIncrementQuery) if err != nil { return err } defer autoIncrementRows.Close() var ( schema, table, column string value, max float64 ) for autoIncrementRows.Next() { if err := autoIncrementRows.Scan( &schema, &table, &column, &value, &max, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementDesc, prometheus.GaugeValue, value, schema, table, column, ) ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementMaxDesc, prometheus.GaugeValue, max, schema, table, column, ) } return nil } // check interface var _ Scraper = ScrapeAutoIncrementColumns{} mysqld_exporter-0.15.0/collector/info_schema_clientstats.go000066400000000000000000000263471444546573200242750ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.client_statistics`. package collector import ( "context" "database/sql" "fmt" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const clientStatQuery = `SELECT * FROM information_schema.client_statistics` var ( // Map known client-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaClientStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_connections"), "The number of connections created for this client.", []string{"client"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_concurrent_connections"), "The number of concurrent connections for this client.", []string{"client"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this client.", []string{"client"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this client.", []string{"client"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this client's connections.", []string{"client"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_received_total"), "The number of bytes received from this client’s connections.", []string{"client"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_bytes_sent_total"), "The number of bytes sent to this client’s connections.", []string{"client"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this client’s connections.", []string{"client"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_read_total"), "The number of rows read by this client’s connections.", []string{"client"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_sent_total"), "The number of rows sent by this client’s connections.", []string{"client"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_deleted_total"), "The number of rows deleted by this client’s connections.", []string{"client"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_inserted_total"), "The number of rows inserted by this client’s connections.", []string{"client"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_fetched_total"), "The number of rows fetched by this client’s connections.", []string{"client"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rows_updated_total"), "The number of rows updated by this client’s connections.", []string{"client"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_table_rows_read_total"), "The number of rows read from tables by this client’s connections. (It may be different from ROWS_FETCHED.)", []string{"client"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_select_commands_total"), "The number of SELECT commands executed from this client’s connections.", []string{"client"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_update_commands_total"), "The number of UPDATE commands executed from this client’s connections.", []string{"client"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_other_commands_total"), "The number of other commands executed from this client’s connections.", []string{"client"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this client’s connections.", []string{"client"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this client’s connections.", []string{"client"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_denied_connections_total"), "The number of connections denied to this client.", []string{"client"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_lost_connections_total"), "The number of this client’s connections that were terminated uncleanly.", []string{"client"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_access_denied_total"), "The number of times this client’s connections issued commands that were denied.", []string{"client"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_empty_queries_total"), "The number of times this client’s connections sent empty queries to the server.", []string{"client"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_total_ssl_connections_total"), "The number of times this client’s connections connected using SSL to the server.", []string{"client"}, nil)}, "MAX_STATEMENT_TIME_EXCEEDED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "client_statistics_max_statement_time_exceeded_total"), "The number of times a statement was aborted, because it was executed longer than its MAX_STATEMENT_TIME threshold.", []string{"client"}, nil)}, } ) // ScrapeClientStat collects from `information_schema.client_statistics`. type ScrapeClientStat struct{} // Name of the Scraper. Should be unique. func (ScrapeClientStat) Name() string { return "info_schema.clientstats" } // Help describes the role of the Scraper. func (ScrapeClientStat) Help() string { return "If running with userstat=1, set to true to collect client statistics" } // Version of MySQL from which scraper is available. func (ScrapeClientStat) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeClientStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var varName, varVal string err := db.QueryRowContext(ctx, userstatCheckQuery).Scan(&varName, &varVal) if err != nil { level.Debug(logger).Log("msg", "Detailed client stats are not available.") return nil } if varVal == "OFF" { level.Debug(logger).Log("msg", "MySQL variable is OFF.", "var", varName) return nil } informationSchemaClientStatisticsRows, err := db.QueryContext(ctx, clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil } // check interface var _ Scraper = ScrapeClientStat{} mysqld_exporter-0.15.0/collector/info_schema_clientstats_test.go000066400000000000000000000107231444546573200253230ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeClientStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"CLIENT", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("localhost", 1002, 0, 127027, 286, 245, float64(2565104853), 21090856, float64(2380108042), 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(clientStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeClientStat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"client": "localhost"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client": "localhost"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: float64(2565104853), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: float64(2380108042), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"client": "localhost"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_innodb_cmp.go000066400000000000000000000102051444546573200240320ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.INNODB_CMP`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const innodbCmpQuery = ` SELECT page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time FROM information_schema.innodb_cmp ` // Metric descriptors. var ( infoSchemaInnodbCmpCompressOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"), "Number of times a B-tree page of the size PAGE_SIZE has been compressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"), "Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpCompressTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"), "Total time in seconds spent in attempts to compress B-tree pages.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"), "Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.", []string{"page_size"}, nil, ) infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"), "Total time in seconds spent in uncompressing B-tree pages.", []string{"page_size"}, nil, ) ) // ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. type ScrapeInnodbCmp struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbCmp) Name() string { return informationSchema + ".innodb_cmp" } // Help describes the role of the Scraper. func (ScrapeInnodbCmp) Help() string { return "Collect metrics from information_schema.innodb_cmp" } // Version of MySQL from which scraper is available. func (ScrapeInnodbCmp) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbCmp) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { informationSchemaInnodbCmpRows, err := db.QueryContext(ctx, innodbCmpQuery) if err != nil { return err } defer informationSchemaInnodbCmpRows.Close() var ( page_size string compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64 ) for informationSchemaInnodbCmpRows.Next() { if err := informationSchemaInnodbCmpRows.Scan( &page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size) } return nil } // check interface var _ Scraper = ScrapeInnodbCmp{} mysqld_exporter-0.15.0/collector/info_schema_innodb_cmp_test.go000066400000000000000000000044221444546573200250750ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeInnodbCmp(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"} rows := sqlmock.NewRows(columns). AddRow("1024", 10, 20, 30, 40, 50) mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbCmp{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_innodb_cmpmem.go000066400000000000000000000076441444546573200245460ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.INNODB_CMPMEM`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const innodbCmpMemQuery = ` SELECT page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time FROM information_schema.innodb_cmpmem ` // Metric descriptors. var ( infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"), "Number of blocks of the size PAGE_SIZE that are currently in use.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemPagesFree = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"), "Number of blocks of the size PAGE_SIZE that are currently available for allocation.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemRelocationOps = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"), "Number of times a block of the size PAGE_SIZE has been relocated.", []string{"page_size", "buffer_pool"}, nil, ) infoSchemaInnodbCmpMemRelocationTime = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"), "Total time in seconds spent in relocating blocks.", []string{"page_size", "buffer_pool"}, nil, ) ) // ScrapeInnodbCmp collects from `information_schema.innodb_cmp`. type ScrapeInnodbCmpMem struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbCmpMem) Name() string { return informationSchema + ".innodb_cmpmem" } // Help describes the role of the Scraper. func (ScrapeInnodbCmpMem) Help() string { return "Collect metrics from information_schema.innodb_cmpmem" } // Version of MySQL from which scraper is available. func (ScrapeInnodbCmpMem) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbCmpMem) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { informationSchemaInnodbCmpMemRows, err := db.QueryContext(ctx, innodbCmpMemQuery) if err != nil { return err } defer informationSchemaInnodbCmpMemRows.Close() var ( page_size, buffer_pool string pages_used, pages_free, relocation_ops, relocation_time float64 ) for informationSchemaInnodbCmpMemRows.Next() { if err := informationSchemaInnodbCmpMemRows.Scan( &page_size, &buffer_pool, &pages_used, &pages_free, &relocation_ops, &relocation_time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesRead, prometheus.CounterValue, pages_used, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool) } return nil } // check interface var _ Scraper = ScrapeInnodbCmpMem{} mysqld_exporter-0.15.0/collector/info_schema_innodb_cmpmem_test.go000066400000000000000000000044121444546573200255730ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeInnodbCmpMem(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"page_size", "buffer_pool", "pages_used", "pages_free", "relocation_ops", "relocation_time"} rows := sqlmock.NewRows(columns). AddRow("1024", "0", 30, 40, 50, 6000) mock.ExpectQuery(sanitizeQuery(innodbCmpMemQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbCmpMem{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 30, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 40, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 50, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 6, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_innodb_metrics.go000066400000000000000000000141431444546573200247260ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.innodb_metrics`. package collector import ( "context" "database/sql" "errors" "fmt" "regexp" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaInnodbMetricsEnabledColumnQuery = ` SELECT column_name FROM information_schema.columns WHERE table_schema = 'information_schema' AND table_name = 'INNODB_METRICS' AND column_name IN ('status', 'enabled') LIMIT 1 ` const infoSchemaInnodbMetricsQuery = ` SELECT name, subsystem, type, comment, count FROM information_schema.innodb_metrics WHERE ` + "`%s` = '%s'" // Metrics descriptors. var ( infoSchemaBufferPageReadTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_read_total"), "Total number of buffer pages read total.", []string{"type"}, nil, ) infoSchemaBufferPageWrittenTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_written_total"), "Total number of buffer pages written total.", []string{"type"}, nil, ) infoSchemaBufferPoolPagesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_pages"), "Total number of buffer pool pages by state.", []string{"state"}, nil, ) infoSchemaBufferPoolPagesDirtyDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_pool_dirty_pages"), "Total number of dirty pages in the buffer pool.", nil, nil, ) ) // Regexp for matching metric aggregations. var ( bufferRE = regexp.MustCompile(`^buffer_(pool_pages)_(.*)$`) bufferPageRE = regexp.MustCompile(`^buffer_page_(read|written)_(.*)$`) ) // ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`. type ScrapeInnodbMetrics struct{} // Name of the Scraper. Should be unique. func (ScrapeInnodbMetrics) Name() string { return informationSchema + ".innodb_metrics" } // Help describes the role of the Scraper. func (ScrapeInnodbMetrics) Help() string { return "Collect metrics from information_schema.innodb_metrics" } // Version of MySQL from which scraper is available. func (ScrapeInnodbMetrics) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInnodbMetrics) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var enabledColumnName string var query string err := db.QueryRowContext(ctx, infoSchemaInnodbMetricsEnabledColumnQuery).Scan(&enabledColumnName) if err != nil { return err } switch enabledColumnName { case "STATUS": query = fmt.Sprintf(infoSchemaInnodbMetricsQuery, "status", "enabled") case "ENABLED": query = fmt.Sprintf(infoSchemaInnodbMetricsQuery, "enabled", "1") default: return errors.New("Couldn't find column STATUS or ENABLED in innodb_metrics table.") } innodbMetricsRows, err := db.QueryContext(ctx, query) if err != nil { return err } defer innodbMetricsRows.Close() var ( name, subsystem, metricType, comment string value float64 ) for innodbMetricsRows.Next() { if err := innodbMetricsRows.Scan( &name, &subsystem, &metricType, &comment, &value, ); err != nil { return err } // Special handling of the "buffer_page_io" subsystem. if subsystem == "buffer_page_io" { match := bufferPageRE.FindStringSubmatch(name) if len(match) != 3 { level.Warn(logger).Log("msg", "innodb_metrics subsystem buffer_page_io returned an invalid name", "name", name) continue } switch match[1] { case "read": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageReadTotalDesc, prometheus.CounterValue, value, match[2], ) case "written": ch <- prometheus.MustNewConstMetric( infoSchemaBufferPageWrittenTotalDesc, prometheus.CounterValue, value, match[2], ) } continue } if subsystem == "buffer" { match := bufferRE.FindStringSubmatch(name) // Many buffer subsystem metrics are not matched, fall through to generic metric. if match != nil { switch match[1] { case "pool_pages": switch match[2] { case "total": // Ignore total, it is an aggregation of the rest. continue case "dirty": // Dirty pages are a separate metric, not in the total. ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDirtyDesc, prometheus.GaugeValue, value, ) default: ch <- prometheus.MustNewConstMetric( infoSchemaBufferPoolPagesDesc, prometheus.GaugeValue, value, match[2], ) } } continue } } metricName := "innodb_metrics_" + subsystem + "_" + name // MySQL returns counters named two different ways. "counter" and "status_counter" // value >= 0 is necessary due to upstream bugs: http://bugs.mysql.com/bug.php?id=75966 if (metricType == "counter" || metricType == "status_counter") && value >= 0 { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName+"_total"), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.CounterValue, value, ) } else { description := prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, metricName), comment, nil, nil, ) ch <- prometheus.MustNewConstMetric( description, prometheus.GaugeValue, value, ) } } return nil } // check interface var _ Scraper = ScrapeInnodbMetrics{} mysqld_exporter-0.15.0/collector/info_schema_innodb_metrics_test.go000066400000000000000000000066731444546573200257760ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeInnodbMetrics(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() enabledColumnName := []string{"COLUMN_NAME"} rows := sqlmock.NewRows(enabledColumnName). AddRow("STATUS") mock.ExpectQuery(sanitizeQuery(infoSchemaInnodbMetricsEnabledColumnQuery)).WillReturnRows(rows) columns := []string{"name", "subsystem", "type", "comment", "count"} rows = sqlmock.NewRows(columns). AddRow("lock_timeouts", "lock", "counter", "Number of lock timeouts", 0). AddRow("buffer_pool_reads", "buffer", "status_counter", "Number of reads directly from disk (innodb_buffer_pool_reads)", 1). AddRow("buffer_pool_size", "server", "value", "Server buffer pool size (all buffer pools) in bytes", 2). AddRow("buffer_page_read_system_page", "buffer_page_io", "counter", "Number of System Pages read", 3). AddRow("buffer_page_written_undo_log", "buffer_page_io", "counter", "Number of Undo Log Pages written", 4). AddRow("buffer_pool_pages_dirty", "buffer", "gauge", "Number of dirt buffer pool pages", 5). AddRow("buffer_pool_pages_data", "buffer", "gauge", "Number of data buffer pool pages", 6). AddRow("buffer_pool_pages_total", "buffer", "gauge", "Number of total buffer pool pages", 7). AddRow("NOPE", "buffer_page_io", "counter", "An invalid buffer_page_io metric", 999) query := fmt.Sprintf(infoSchemaInnodbMetricsQuery, "status", "enabled") mock.ExpectQuery(sanitizeQuery(query)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInnodbMetrics{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 1, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"type": "system_page"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"type": "undo_log"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 5, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"state": "data"}, value: 6, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_innodb_sys_tablespaces.go000066400000000000000000000111341444546573200264410ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.innodb_sys_tablespaces`. package collector import ( "context" "database/sql" "errors" "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const innodbTablespacesTablenameQuery = ` SELECT table_name FROM information_schema.tables WHERE table_name = 'INNODB_SYS_TABLESPACES' OR table_name = 'INNODB_TABLESPACES' ` const innodbTablespacesQuery = ` SELECT SPACE, NAME, ifnull((SELECT column_name FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'information_schema' AND TABLE_NAME = ` + "'%s'" + ` AND COLUMN_NAME = 'FILE_FORMAT' LIMIT 1), 'NONE') as FILE_FORMAT, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(SPACE_TYPE, 'NONE') as SPACE_TYPE, FILE_SIZE, ALLOCATED_SIZE FROM information_schema.` + "`%s`" // Metric descriptors. var ( infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"), "The Tablespace information and Space ID.", []string{"tablespace_name", "file_format", "row_format", "space_type"}, nil, ) infoSchemaInnodbTablesspaceFileSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_file_size_bytes"), "The apparent size of the file, which represents the maximum size of the file, uncompressed.", []string{"tablespace_name"}, nil, ) infoSchemaInnodbTablesspaceAllocatedSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_allocated_size_bytes"), "The actual size of the file, which is the amount of space allocated on disk.", []string{"tablespace_name"}, nil, ) ) // ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`. type ScrapeInfoSchemaInnodbTablespaces struct{} // Name of the Scraper. Should be unique. func (ScrapeInfoSchemaInnodbTablespaces) Name() string { return informationSchema + ".innodb_tablespaces" } // Help describes the role of the Scraper. func (ScrapeInfoSchemaInnodbTablespaces) Help() string { return "Collect metrics from information_schema.innodb_sys_tablespaces" } // Version of MySQL from which scraper is available. func (ScrapeInfoSchemaInnodbTablespaces) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeInfoSchemaInnodbTablespaces) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var tablespacesTablename string var query string err := db.QueryRowContext(ctx, innodbTablespacesTablenameQuery).Scan(&tablespacesTablename) if err != nil { return err } switch tablespacesTablename { case "INNODB_SYS_TABLESPACES", "INNODB_TABLESPACES": query = fmt.Sprintf(innodbTablespacesQuery, tablespacesTablename, tablespacesTablename) default: return errors.New("Couldn't find INNODB_SYS_TABLESPACES or INNODB_TABLESPACES in information_schema.") } tablespacesRows, err := db.QueryContext(ctx, query) if err != nil { return err } defer tablespacesRows.Close() var ( tableSpace uint32 tableName string fileFormat string rowFormat string spaceType string fileSize uint64 allocatedSize uint64 ) for tablespacesRows.Next() { err = tablespacesRows.Scan( &tableSpace, &tableName, &fileFormat, &rowFormat, &spaceType, &fileSize, &allocatedSize, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceInfoDesc, prometheus.GaugeValue, float64(tableSpace), tableName, fileFormat, rowFormat, spaceType, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceFileSizeDesc, prometheus.GaugeValue, float64(fileSize), tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaInnodbTablesspaceAllocatedSizeDesc, prometheus.GaugeValue, float64(allocatedSize), tableName, ) } return nil } // check interface var _ Scraper = ScrapeInfoSchemaInnodbTablespaces{} mysqld_exporter-0.15.0/collector/info_schema_innodb_sys_tablespaces_test.go000066400000000000000000000060601444546573200275020ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeInfoSchemaInnodbTablespaces(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"TABLE_NAME"} rows := sqlmock.NewRows(columns). AddRow("INNODB_SYS_TABLESPACES") mock.ExpectQuery(sanitizeQuery(innodbTablespacesTablenameQuery)).WillReturnRows(rows) tablespacesTablename := "INNODB_SYS_TABLESPACES" columns = []string{"SPACE", "NAME", "FILE_FORMAT", "ROW_FORMAT", "SPACE_TYPE", "FILE_SIZE", "ALLOCATED_SIZE"} rows = sqlmock.NewRows(columns). AddRow(1, "sys/sys_config", "Barracuda", "Dynamic", "Single", 100, 100). AddRow(2, "db/compressed", "Barracuda", "Compressed", "Single", 300, 200) query := fmt.Sprintf(innodbTablespacesQuery, tablespacesTablename, tablespacesTablename) mock.ExpectQuery(sanitizeQuery(query)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeInfoSchemaInnodbTablespaces{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"tablespace_name": "sys/sys_config", "file_format": "Barracuda", "row_format": "Dynamic", "space_type": "Single"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "sys/sys_config"}, value: 100, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed", "file_format": "Barracuda", "row_format": "Compressed", "space_type": "Single"}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 300, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"tablespace_name": "db/compressed"}, value: 200, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_processlist.go000077500000000000000000000141111444546573200242770ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.processlist`. package collector import ( "context" "database/sql" "fmt" "reflect" "sort" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const infoSchemaProcesslistQuery = ` SELECT user, SUBSTRING_INDEX(host, ':', 1) AS host, COALESCE(command, '') AS command, COALESCE(state, '') AS state, COUNT(*) AS processes, SUM(time) AS seconds FROM information_schema.processlist WHERE ID != connection_id() AND TIME >= %d GROUP BY user, SUBSTRING_INDEX(host, ':', 1), command, state ` // Tunable flags. var ( processlistMinTime = kingpin.Flag( "collect.info_schema.processlist.min_time", "Minimum time a thread must be in each state to be counted", ).Default("0").Int() processesByUserFlag = kingpin.Flag( "collect.info_schema.processlist.processes_by_user", "Enable collecting the number of processes by user", ).Default("true").Bool() processesByHostFlag = kingpin.Flag( "collect.info_schema.processlist.processes_by_host", "Enable collecting the number of processes by host", ).Default("true").Bool() ) // Metric descriptors. var ( processlistCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "processlist_threads"), "The number of threads split by current state.", []string{"command", "state"}, nil) processlistTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "processlist_seconds"), "The number of seconds threads have used split by current state.", []string{"command", "state"}, nil) processesByUserDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "processlist_processes_by_user"), "The number of processes by user.", []string{"mysql_user"}, nil) processesByHostDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "processlist_processes_by_host"), "The number of processes by host.", []string{"client_host"}, nil) ) // ScrapeProcesslist collects from `information_schema.processlist`. type ScrapeProcesslist struct{} // Name of the Scraper. Should be unique. func (ScrapeProcesslist) Name() string { return informationSchema + ".processlist" } // Help describes the role of the Scraper. func (ScrapeProcesslist) Help() string { return "Collect current thread state counts from the information_schema.processlist" } // Version of MySQL from which scraper is available. func (ScrapeProcesslist) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, *processlistMinTime, ) processlistRows, err := db.QueryContext(ctx, processQuery) if err != nil { return err } defer processlistRows.Close() var ( user string host string command string state string count uint32 time uint32 ) // Define maps stateCounts := make(map[string]map[string]uint32) stateTime := make(map[string]map[string]uint32) stateHostCounts := make(map[string]uint32) stateUserCounts := make(map[string]uint32) for processlistRows.Next() { err = processlistRows.Scan(&user, &host, &command, &state, &count, &time) if err != nil { return err } command = sanitizeState(command) state = sanitizeState(state) if host == "" { host = "unknown" } // Init maps if _, ok := stateCounts[command]; !ok { stateCounts[command] = make(map[string]uint32) stateTime[command] = make(map[string]uint32) } if _, ok := stateCounts[command][state]; !ok { stateCounts[command][state] = 0 stateTime[command][state] = 0 } if _, ok := stateHostCounts[host]; !ok { stateHostCounts[host] = 0 } if _, ok := stateUserCounts[user]; !ok { stateUserCounts[user] = 0 } stateCounts[command][state] += count stateTime[command][state] += time stateHostCounts[host] += count stateUserCounts[user] += count } for _, command := range sortedMapKeys(stateCounts) { for _, state := range sortedMapKeys(stateCounts[command]) { ch <- prometheus.MustNewConstMetric(processlistCountDesc, prometheus.GaugeValue, float64(stateCounts[command][state]), command, state) ch <- prometheus.MustNewConstMetric(processlistTimeDesc, prometheus.GaugeValue, float64(stateTime[command][state]), command, state) } } if *processesByHostFlag { for _, host := range sortedMapKeys(stateHostCounts) { ch <- prometheus.MustNewConstMetric(processesByHostDesc, prometheus.GaugeValue, float64(stateHostCounts[host]), host) } } if *processesByUserFlag { for _, user := range sortedMapKeys(stateUserCounts) { ch <- prometheus.MustNewConstMetric(processesByUserDesc, prometheus.GaugeValue, float64(stateUserCounts[user]), user) } } return nil } func sortedMapKeys(m interface{}) []string { v := reflect.ValueOf(m) keys := make([]string, 0, len(v.MapKeys())) for _, key := range v.MapKeys() { keys = append(keys, key.String()) } sort.Strings(keys) return keys } func sanitizeState(state string) string { if state == "" { state = "unknown" } state = strings.ToLower(state) replacements := map[string]string{ ";": "", ",": "", ":": "", ".": "", "(": "", ")": "", " ": "_", "-": "_", } for r := range replacements { state = strings.Replace(state, r, replacements[r], -1) } return state } // check interface var _ Scraper = ScrapeProcesslist{} mysqld_exporter-0.15.0/collector/info_schema_processlist_test.go000066400000000000000000000111461444546573200253400ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeProcesslist(t *testing.T) { _, err := kingpin.CommandLine.Parse([]string{ "--collect.info_schema.processlist.processes_by_user", "--collect.info_schema.processlist.processes_by_host", }) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() query := fmt.Sprintf(infoSchemaProcesslistQuery, 0) columns := []string{"user", "host", "command", "state", "processes", "seconds"} rows := sqlmock.NewRows(columns). AddRow("manager", "10.0.7.234", "Sleep", "", 10, 87). AddRow("feedback", "10.0.7.154", "Sleep", "", 8, 842). AddRow("root", "10.0.7.253", "Sleep", "", 1, 20). AddRow("feedback", "10.0.7.179", "Sleep", "", 2, 14). AddRow("system user", "", "Connect", "waiting for handler commit", 1, 7271248). AddRow("manager", "10.0.7.234", "Sleep", "", 4, 62). AddRow("system user", "", "Query", "Slave has read all relay log; waiting for more updates", 1, 7271248). AddRow("event_scheduler", "localhost", "Daemon", "Waiting on empty queue", 1, 7271248) mock.ExpectQuery(sanitizeQuery(query)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeProcesslist{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"command": "connect", "state": "waiting_for_handler_commit"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "connect", "state": "waiting_for_handler_commit"}, value: 7271248, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "daemon", "state": "waiting_on_empty_queue"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "daemon", "state": "waiting_on_empty_queue"}, value: 7271248, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "query", "state": "slave_has_read_all_relay_log_waiting_for_more_updates"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "query", "state": "slave_has_read_all_relay_log_waiting_for_more_updates"}, value: 7271248, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "sleep", "state": "unknown"}, value: 25, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"command": "sleep", "state": "unknown"}, value: 1025, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "10.0.7.154"}, value: 8, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "10.0.7.179"}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "10.0.7.234"}, value: 14, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "10.0.7.253"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "localhost"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"client_host": "unknown"}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"mysql_user": "event_scheduler"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"mysql_user": "feedback"}, value: 10, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"mysql_user": "manager"}, value: 14, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"mysql_user": "root"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"mysql_user": "system user"}, value: 2, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_query_response_time.go000066400000000000000000000111311444546573200260220ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.query_response_time*` tables. package collector import ( "context" "database/sql" "strconv" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const queryResponseCheckQuery = `SELECT @@query_response_time_stats` var ( // Use uppercase for table names, otherwise read/write split will return the same results as total // due to the bug. queryResponseTimeQueries = [3]string{ "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME", "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME_READ", "SELECT TIME, COUNT, TOTAL FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME_WRITE", } infoSchemaQueryResponseTimeCountDescs = [3]*prometheus.Desc{ prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "query_response_time_seconds"), "The number of all queries by duration they took to execute.", []string{}, nil, ), prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "read_query_response_time_seconds"), "The number of read queries by duration they took to execute.", []string{}, nil, ), prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "write_query_response_time_seconds"), "The number of write queries by duration they took to execute.", []string{}, nil, ), } ) func processQueryResponseTimeTable(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, query string, i int) error { queryDistributionRows, err := db.QueryContext(ctx, query) if err != nil { return err } defer queryDistributionRows.Close() var ( length string count uint64 total string histogramCnt uint64 histogramSum float64 countBuckets = map[float64]uint64{} ) for queryDistributionRows.Next() { err = queryDistributionRows.Scan( &length, &count, &total, ) if err != nil { return err } length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64) total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64) histogramCnt += count histogramSum += total // Special case for "TOO LONG" row where we take into account the count field which is the only available // and do not add it as a part of histogram or metric if length == 0 { continue } countBuckets[length] = histogramCnt } // Create histogram with query counts ch <- prometheus.MustNewConstHistogram( infoSchemaQueryResponseTimeCountDescs[i], histogramCnt, histogramSum, countBuckets, ) return nil } // ScrapeQueryResponseTime collects from `information_schema.query_response_time`. type ScrapeQueryResponseTime struct{} // Name of the Scraper. Should be unique. func (ScrapeQueryResponseTime) Name() string { return "info_schema.query_response_time" } // Help describes the role of the Scraper. func (ScrapeQueryResponseTime) Help() string { return "Collect query response time distribution if query_response_time_stats is ON." } // Version of MySQL from which scraper is available. func (ScrapeQueryResponseTime) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeQueryResponseTime) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var queryStats uint8 err := db.QueryRowContext(ctx, queryResponseCheckQuery).Scan(&queryStats) if err != nil { level.Debug(logger).Log("msg", "Query response time distribution is not available.") return nil } if queryStats == 0 { level.Debug(logger).Log("msg", "MySQL variable is OFF.", "var", "query_response_time_stats") return nil } for i, query := range queryResponseTimeQueries { err := processQueryResponseTimeTable(ctx, db, ch, query, i) // The first query should not fail if query_response_time_stats is ON, // unlike the other two when the read/write tables exist only with Percona Server 5.6/5.7. if i == 0 && err != nil { return err } } return nil } // check interface var _ Scraper = ScrapeQueryResponseTime{} mysqld_exporter-0.15.0/collector/info_schema_query_response_time_test.go000066400000000000000000000054601444546573200270710ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeQueryResponseTime(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(queryResponseCheckQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1)) rows := sqlmock.NewRows([]string{"TIME", "COUNT", "TOTAL"}). AddRow(0.000001, 124, 0.000000). AddRow(0.000010, 179, 0.000797). AddRow(0.000100, 2859, 0.107321). AddRow(0.001000, 1085, 0.335395). AddRow(0.010000, 269, 0.522264). AddRow(0.100000, 11, 0.344209). AddRow(1.000000, 1, 0.267369). AddRow(10.000000, 0, 0.000000). AddRow(100.000000, 0, 0.000000). AddRow(1000.000000, 0, 0.000000). AddRow(10000.000000, 0, 0.000000). AddRow(100000.000000, 0, 0.000000). AddRow(1000000.000000, 0, 0.000000). AddRow("TOO LONG", 0, "TOO LONG") mock.ExpectQuery(sanitizeQuery(queryResponseTimeQueries[0])).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeQueryResponseTime{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() // Test histogram expectCounts := map[float64]uint64{ 1e-06: 124, 1e-05: 303, 0.0001: 3162, 0.001: 4247, 0.01: 4516, 0.1: 4527, 1: 4528, 10: 4528, 100: 4528, 1000: 4528, 10000: 4528, 100000: 4528, 1e+06: 4528, } expectHistogram := prometheus.MustNewConstHistogram(infoSchemaQueryResponseTimeCountDescs[0], 4528, 1.5773549999999998, expectCounts) expectPb := &dto.Metric{} expectHistogram.Write(expectPb) gotPb := &dto.Metric{} gotHistogram := <-ch // read the last item from channel gotHistogram.Write(gotPb) convey.Convey("Histogram comparison", t, func() { convey.So(expectPb.Histogram, convey.ShouldResemble, gotPb.Histogram) }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_replica_host.go000066400000000000000000000111441444546573200244010ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.replica_host_status`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/go-kit/log/level" MySQL "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" ) const replicaHostQuery = ` SELECT SERVER_ID , if(SESSION_ID='MASTER_SESSION_ID','writer','reader') AS ROLE , CPU , MASTER_SLAVE_LATENCY_IN_MICROSECONDS , REPLICA_LAG_IN_MILLISECONDS , LOG_STREAM_SPEED_IN_KiB_PER_SECOND , CURRENT_REPLAY_LATENCY_IN_MICROSECONDS FROM information_schema.replica_host_status ` // Metric descriptors. var ( infoSchemaReplicaHostCpuDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "replica_host_cpu_percent"), "The CPU usage as a percentage.", []string{"server_id", "role"}, nil, ) infoSchemaReplicaHostReplicaLatencyDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "replica_host_replica_latency_seconds"), "The source-replica latency in seconds.", []string{"server_id", "role"}, nil, ) infoSchemaReplicaHostLagDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "replica_host_lag_seconds"), "The replica lag in seconds.", []string{"server_id", "role"}, nil, ) infoSchemaReplicaHostLogStreamSpeedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "replica_host_log_stream_speed"), "The log stream speed in kilobytes per second.", []string{"server_id", "role"}, nil, ) infoSchemaReplicaHostReplayLatencyDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "replica_host_replay_latency_seconds"), "The current replay latency in seconds.", []string{"server_id", "role"}, nil, ) ) // ScrapeReplicaHost collects from `information_schema.replica_host_status`. type ScrapeReplicaHost struct{} // Name of the Scraper. Should be unique. func (ScrapeReplicaHost) Name() string { return "info_schema.replica_host" } // Help describes the role of the Scraper. func (ScrapeReplicaHost) Help() string { return "Collect metrics from information_schema.replica_host_status" } // Version of MySQL from which scraper is available. func (ScrapeReplicaHost) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeReplicaHost) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { replicaHostRows, err := db.QueryContext(ctx, replicaHostQuery) if err != nil { if mysqlErr, ok := err.(*MySQL.MySQLError); ok { // Now the error number is accessible directly // Check for error 1109: Unknown table if mysqlErr.Number == 1109 { level.Debug(logger).Log("msg", "information_schema.replica_host_status is not available.") return nil } } return err } defer replicaHostRows.Close() var ( serverId string role string cpu float64 replicaLatency uint64 replicaLag float64 logStreamSpeed float64 replayLatency uint64 ) for replicaHostRows.Next() { if err := replicaHostRows.Scan( &serverId, &role, &cpu, &replicaLatency, &replicaLag, &logStreamSpeed, &replayLatency, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaReplicaHostCpuDesc, prometheus.GaugeValue, cpu, serverId, role, ) ch <- prometheus.MustNewConstMetric( infoSchemaReplicaHostReplicaLatencyDesc, prometheus.GaugeValue, float64(replicaLatency)*0.000001, serverId, role, ) ch <- prometheus.MustNewConstMetric( infoSchemaReplicaHostLagDesc, prometheus.GaugeValue, replicaLag*0.001, serverId, role, ) ch <- prometheus.MustNewConstMetric( infoSchemaReplicaHostLogStreamSpeedDesc, prometheus.GaugeValue, logStreamSpeed, serverId, role, ) ch <- prometheus.MustNewConstMetric( infoSchemaReplicaHostReplayLatencyDesc, prometheus.GaugeValue, float64(replayLatency)*0.000001, serverId, role, ) } return nil } // check interface var _ Scraper = ScrapeReplicaHost{} mysqld_exporter-0.15.0/collector/info_schema_replica_host_test.go000066400000000000000000000066451444546573200254520ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeReplicaHost(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"SERVER_ID", "ROLE", "CPU", "MASTER_SLAVE_LATENCY_IN_MICROSECONDS", "REPLICA_LAG_IN_MILLISECONDS", "LOG_STREAM_SPEED_IN_KiB_PER_SECOND", "CURRENT_REPLAY_LATENCY_IN_MICROSECONDS"} rows := sqlmock.NewRows(columns). AddRow("dbtools-cluster-us-west-2c", "reader", 1.2531328201293945, 250000, 20.069000244140625, 2.0368164549078225, 500000). AddRow("dbtools-cluster-writer", "writer", 1.9607843160629272, 250000, 0, 2.0368164549078225, 0) mock.ExpectQuery(sanitizeQuery(replicaHostQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeReplicaHost{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"server_id": "dbtools-cluster-us-west-2c", "role": "reader"}, value: 1.2531328201293945, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-us-west-2c", "role": "reader"}, value: 0.25, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-us-west-2c", "role": "reader"}, value: 0.020069000244140625, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-us-west-2c", "role": "reader"}, value: 2.0368164549078225, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-us-west-2c", "role": "reader"}, value: 0.5, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-writer", "role": "writer"}, value: 1.9607843160629272, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-writer", "role": "writer"}, value: 0.25, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-writer", "role": "writer"}, value: 0.0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-writer", "role": "writer"}, value: 2.0368164549078225, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "dbtools-cluster-writer", "role": "writer"}, value: 0.0, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_schemastats.go000066400000000000000000000075161444546573200242540ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.table_statistics`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const schemaStatQuery = ` SELECT TABLE_SCHEMA, SUM(ROWS_READ) AS ROWS_READ, SUM(ROWS_CHANGED) AS ROWS_CHANGED, SUM(ROWS_CHANGED_X_INDEXES) AS ROWS_CHANGED_X_INDEXES FROM information_schema.TABLE_STATISTICS GROUP BY TABLE_SCHEMA; ` // Metric descriptors. var ( infoSchemaStatsRowsReadDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_read_total"), "The number of rows read from the schema.", []string{"schema"}, nil, ) infoSchemaStatsRowsChangedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_changed_total"), "The number of rows changed in the schema.", []string{"schema"}, nil, ) infoSchemaStatsRowsChangedXIndexesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_changed_x_indexes_total"), "The number of rows changed in the schema, multiplied by the number of indexes changed.", []string{"schema"}, nil, ) ) // ScrapeSchemaStat collects from `information_schema.table_statistics` grouped by schema. type ScrapeSchemaStat struct{} // Name of the Scraper. Should be unique. func (ScrapeSchemaStat) Name() string { return "info_schema.schemastats" } // Help describes the role of the Scraper. func (ScrapeSchemaStat) Help() string { return "If running with userstat=1, set to true to collect schema statistics" } // Version of MySQL from which scraper is available. func (ScrapeSchemaStat) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeSchemaStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var varName, varVal string err := db.QueryRowContext(ctx, userstatCheckQuery).Scan(&varName, &varVal) if err != nil { level.Debug(logger).Log("msg", "Detailed schema stats are not available.") return nil } if varVal == "OFF" { level.Debug(logger).Log("msg", "MySQL variable is OFF.", "var", varName) return nil } informationSchemaTableStatisticsRows, err := db.QueryContext(ctx, schemaStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, ) ch <- prometheus.MustNewConstMetric( infoSchemaStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, ) ch <- prometheus.MustNewConstMetric( infoSchemaStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, ) } return nil } // check interface var _ Scraper = ScrapeSchemaStat{} mysqld_exporter-0.15.0/collector/info_schema_schemastats_test.go000066400000000000000000000043571444546573200253130ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/smartystreets/goconvey/convey" ) func TestScrapeSchemaStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"TABLE_SCHEMA", "ROWS_READ", "ROWS_CHANGED", "ROWS_CHANGED_X_INDEXES"} rows := sqlmock.NewRows(columns). AddRow("mysql", 238, 0, 8). AddRow("default", 99, 1, 0) mock.ExpectQuery(sanitizeQuery(schemaStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSchemaStat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"schema": "mysql"}, value: 238}, {labels: labelMap{"schema": "mysql"}, value: 0}, {labels: labelMap{"schema": "mysql"}, value: 8}, {labels: labelMap{"schema": "default"}, value: 99}, {labels: labelMap{"schema": "default"}, value: 1}, {labels: labelMap{"schema": "default"}, value: 0}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_tables.go000066400000000000000000000122121444546573200231740ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.tables`. package collector import ( "context" "database/sql" "fmt" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( tableSchemaQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ifnull(ENGINE, 'NONE') as ENGINE, ifnull(VERSION, '0') as VERSION, ifnull(ROW_FORMAT, 'NONE') as ROW_FORMAT, ifnull(TABLE_ROWS, '0') as TABLE_ROWS, ifnull(DATA_LENGTH, '0') as DATA_LENGTH, ifnull(INDEX_LENGTH, '0') as INDEX_LENGTH, ifnull(DATA_FREE, '0') as DATA_FREE, ifnull(CREATE_OPTIONS, 'NONE') as CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_SCHEMA = '%s' ` dbListQuery = ` SELECT SCHEMA_NAME FROM information_schema.schemata WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') ` ) // Tunable flags. var ( tableSchemaDatabases = kingpin.Flag( "collect.info_schema.tables.databases", "The list of databases to collect table stats for, or '*' for all", ).Default("*").String() ) // Metric descriptors. var ( infoSchemaTablesVersionDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_version"), "The version number of the table's .frm file", []string{"schema", "table", "type", "engine", "row_format", "create_options"}, nil, ) infoSchemaTablesRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_rows"), "The estimated number of rows in the table from information_schema.tables", []string{"schema", "table"}, nil, ) infoSchemaTablesSizeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_size"), "The size of the table components from information_schema.tables", []string{"schema", "table", "component"}, nil, ) ) // ScrapeTableSchema collects from `information_schema.tables`. type ScrapeTableSchema struct{} // Name of the Scraper. Should be unique. func (ScrapeTableSchema) Name() string { return informationSchema + ".tables" } // Help describes the role of the Scraper. func (ScrapeTableSchema) Help() string { return "Collect metrics from information_schema.tables" } // Version of MySQL from which scraper is available. func (ScrapeTableSchema) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var dbList []string if *tableSchemaDatabases == "*" { dbListRows, err := db.QueryContext(ctx, dbListQuery) if err != nil { return err } defer dbListRows.Close() var database string for dbListRows.Next() { if err := dbListRows.Scan( &database, ); err != nil { return err } dbList = append(dbList, database) } } else { dbList = strings.Split(*tableSchemaDatabases, ",") } for _, database := range dbList { tableSchemaRows, err := db.QueryContext(ctx, fmt.Sprintf(tableSchemaQuery, database)) if err != nil { return err } defer tableSchemaRows.Close() var ( tableSchema string tableName string tableType string engine string version uint64 rowFormat string tableRows uint64 dataLength uint64 indexLength uint64 dataFree uint64 createOptions string ) for tableSchemaRows.Next() { err = tableSchemaRows.Scan( &tableSchema, &tableName, &tableType, &engine, &version, &rowFormat, &tableRows, &dataLength, &indexLength, &dataFree, &createOptions, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTablesVersionDesc, prometheus.GaugeValue, float64(version), tableSchema, tableName, tableType, engine, rowFormat, createOptions, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesRowsDesc, prometheus.GaugeValue, float64(tableRows), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataLength), tableSchema, tableName, "data_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(indexLength), tableSchema, tableName, "index_length", ) ch <- prometheus.MustNewConstMetric( infoSchemaTablesSizeDesc, prometheus.GaugeValue, float64(dataFree), tableSchema, tableName, "data_free", ) } } return nil } // check interface var _ Scraper = ScrapeTableSchema{} mysqld_exporter-0.15.0/collector/info_schema_tablestats.go000066400000000000000000000075371444546573200241060ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.table_statistics`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const tableStatQuery = ` SELECT TABLE_SCHEMA, TABLE_NAME, ROWS_READ, ROWS_CHANGED, ROWS_CHANGED_X_INDEXES FROM information_schema.table_statistics ` // Metric descriptors. var ( infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"), "The number of rows read from the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_total"), "The number of rows changed in the table.", []string{"schema", "table"}, nil, ) infoSchemaTableStatsRowsChangedXIndexesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_changed_x_indexes_total"), "The number of rows changed in the table, multiplied by the number of indexes changed.", []string{"schema", "table"}, nil, ) ) // ScrapeTableStat collects from `information_schema.table_statistics`. type ScrapeTableStat struct{} // Name of the Scraper. Should be unique. func (ScrapeTableStat) Name() string { return "info_schema.tablestats" } // Help describes the role of the Scraper. func (ScrapeTableStat) Help() string { return "If running with userstat=1, set to true to collect table statistics" } // Version of MySQL from which scraper is available. func (ScrapeTableStat) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeTableStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var varName, varVal string err := db.QueryRowContext(ctx, userstatCheckQuery).Scan(&varName, &varVal) if err != nil { level.Debug(logger).Log("msg", "Detailed table stats are not available.") return nil } if varVal == "OFF" { level.Debug(logger).Log("msg", "MySQL variable is OFF.", "var", varName) return nil } informationSchemaTableStatisticsRows, err := db.QueryContext(ctx, tableStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string tableName string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &tableName, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, tableName, ) } return nil } // check interface var _ Scraper = ScrapeTableStat{} mysqld_exporter-0.15.0/collector/info_schema_tablestats_test.go000066400000000000000000000051661444546573200251410ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/smartystreets/goconvey/convey" ) func TestScrapeTableStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"TABLE_SCHEMA", "TABLE_NAME", "ROWS_READ", "ROWS_CHANGED", "ROWS_CHANGED_X_INDEXES"} rows := sqlmock.NewRows(columns). AddRow("mysql", "db", 238, 0, 8). AddRow("mysql", "proxies_priv", 99, 1, 0). AddRow("mysql", "user", 1064, 2, 5) mock.ExpectQuery(sanitizeQuery(tableStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeTableStat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"schema": "mysql", "table": "db"}, value: 238}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "db"}, value: 8}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 99}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 1}, {labels: labelMap{"schema": "mysql", "table": "proxies_priv"}, value: 0}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 1064}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 2}, {labels: labelMap{"schema": "mysql", "table": "user"}, value: 5}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/info_schema_userstats.go000066400000000000000000000252371444546573200237720ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `information_schema.user_statistics`. package collector import ( "context" "database/sql" "fmt" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const userStatQuery = `SELECT * FROM information_schema.user_statistics` var ( // Map known user-statistics values to types. Unknown types will be mapped as // untyped. informationSchemaUserStatisticsTypes = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "TOTAL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_connections"), "The number of connections created for this user.", []string{"user"}, nil)}, "CONCURRENT_CONNECTIONS": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_concurrent_connections"), "The number of concurrent connections for this user.", []string{"user"}, nil)}, "CONNECTED_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_connected_time_seconds_total"), "The cumulative number of seconds elapsed while there were connections from this user.", []string{"user"}, nil)}, "BUSY_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_busy_seconds_total"), "The cumulative number of seconds there was activity on connections from this user.", []string{"user"}, nil)}, "CPU_TIME": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_cpu_time_seconds_total"), "The cumulative CPU time elapsed, in seconds, while servicing this user's connections.", []string{"user"}, nil)}, "BYTES_RECEIVED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_received_total"), "The number of bytes received from this user’s connections.", []string{"user"}, nil)}, "BYTES_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_bytes_sent_total"), "The number of bytes sent to this user’s connections.", []string{"user"}, nil)}, "BINLOG_BYTES_WRITTEN": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_binlog_bytes_written_total"), "The number of bytes written to the binary log from this user’s connections.", []string{"user"}, nil)}, "ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_read_total"), "The number of rows read by this user's connections.", []string{"user"}, nil)}, "ROWS_SENT": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_sent_total"), "The number of rows sent by this user's connections.", []string{"user"}, nil)}, "ROWS_DELETED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_deleted_total"), "The number of rows deleted by this user's connections.", []string{"user"}, nil)}, "ROWS_INSERTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_inserted_total"), "The number of rows inserted by this user's connections.", []string{"user"}, nil)}, "ROWS_FETCHED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_fetched_total"), "The number of rows fetched by this user’s connections.", []string{"user"}, nil)}, "ROWS_UPDATED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rows_updated_total"), "The number of rows updated by this user’s connections.", []string{"user"}, nil)}, "TABLE_ROWS_READ": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_table_rows_read_total"), "The number of rows read from tables by this user’s connections. (It may be different from ROWS_FETCHED.)", []string{"user"}, nil)}, "SELECT_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_select_commands_total"), "The number of SELECT commands executed from this user’s connections.", []string{"user"}, nil)}, "UPDATE_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_update_commands_total"), "The number of UPDATE commands executed from this user’s connections.", []string{"user"}, nil)}, "OTHER_COMMANDS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_other_commands_total"), "The number of other commands executed from this user’s connections.", []string{"user"}, nil)}, "COMMIT_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_commit_transactions_total"), "The number of COMMIT commands issued by this user’s connections.", []string{"user"}, nil)}, "ROLLBACK_TRANSACTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_rollback_transactions_total"), "The number of ROLLBACK commands issued by this user’s connections.", []string{"user"}, nil)}, "DENIED_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_denied_connections_total"), "The number of connections denied to this user.", []string{"user"}, nil)}, "LOST_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_lost_connections_total"), "The number of this user’s connections that were terminated uncleanly.", []string{"user"}, nil)}, "ACCESS_DENIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_access_denied_total"), "The number of times this user’s connections issued commands that were denied.", []string{"user"}, nil)}, "EMPTY_QUERIES": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_empty_queries_total"), "The number of times this user’s connections sent empty queries to the server.", []string{"user"}, nil)}, "TOTAL_SSL_CONNECTIONS": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "user_statistics_total_ssl_connections_total"), "The number of times this user’s connections connected using SSL to the server.", []string{"user"}, nil)}, } ) // ScrapeUserStat collects from `information_schema.user_statistics`. type ScrapeUserStat struct{} // Name of the Scraper. Should be unique. func (ScrapeUserStat) Name() string { return "info_schema.userstats" } // Help describes the role of the Scraper. func (ScrapeUserStat) Help() string { return "If running with userstat=1, set to true to collect user statistics" } // Version of MySQL from which scraper is available. func (ScrapeUserStat) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeUserStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var varName, varVal string err := db.QueryRowContext(ctx, userstatCheckQuery).Scan(&varName, &varVal) if err != nil { level.Debug(logger).Log("msg", "Detailed user stats are not available.") return nil } if varVal == "OFF" { level.Debug(logger).Log("msg", "MySQL variable is OFF.", "var", varName) return nil } informationSchemaUserStatisticsRows, err := db.QueryContext(ctx, userStatQuery) if err != nil { return err } defer informationSchemaUserStatisticsRows.Close() // The user column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the user column, userStatData[0] maps to columnNames[1] when reading off the metrics // (because userStatScanArgs is mapped as [ &user, &userData[0], &userData[1] ... &userdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] var columnNames []string columnNames, err = informationSchemaUserStatisticsRows.Columns() if err != nil { return err } var user string // Holds the username, which should be in column 0. var userStatData = make([]float64, len(columnNames)-1) // 1 less because of the user column. var userStatScanArgs = make([]interface{}, len(columnNames)) userStatScanArgs[0] = &user for i := range userStatData { userStatScanArgs[i+1] = &userStatData[i] } for informationSchemaUserStatisticsRows.Next() { err = informationSchemaUserStatisticsRows.Scan(userStatScanArgs...) if err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // user, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaUserStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(userStatData[idx]), user) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("user_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"user"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(userStatData[idx]), user) } } } return nil } // check interface var _ Scraper = ScrapeUserStat{} mysqld_exporter-0.15.0/collector/info_schema_userstats_test.go000066400000000000000000000106371444546573200250270ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeUserStat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("userstat", "ON")) columns := []string{"USER", "TOTAL_CONNECTIONS", "CONCURRENT_CONNECTIONS", "CONNECTED_TIME", "BUSY_TIME", "CPU_TIME", "BYTES_RECEIVED", "BYTES_SENT", "BINLOG_BYTES_WRITTEN", "ROWS_READ", "ROWS_SENT", "ROWS_DELETED", "ROWS_INSERTED", "ROWS_UPDATED", "SELECT_COMMANDS", "UPDATE_COMMANDS", "OTHER_COMMANDS", "COMMIT_TRANSACTIONS", "ROLLBACK_TRANSACTIONS", "DENIED_CONNECTIONS", "LOST_CONNECTIONS", "ACCESS_DENIED", "EMPTY_QUERIES"} rows := sqlmock.NewRows(columns). AddRow("user_test", 1002, 0, 127027, 286, 245, float64(2565104853), 21090856, float64(2380108042), 767691, 1764, 8778, 1210741, 0, 1764, 1214416, 293, 2430888, 0, 0, 0, 0, 0) mock.ExpectQuery(sanitizeQuery(userStatQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeUserStat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{"user": "user_test"}, value: 1002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"user": "user_test"}, value: 127027, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 286, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 245, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: float64(2565104853), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 21090856, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: float64(2380108042), metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 767691, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 8778, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1210741, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1764, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 1214416, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 293, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 2430888, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"user": "user_test"}, value: 0, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/mysql_user.go000066400000000000000000000147251444546573200216050ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `mysql.user`. package collector import ( "context" "database/sql" "fmt" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) // mysqlSubsystem used for metric names. const mysqlSubsystem = "mysql" const mysqlUserQuery = ` SELECT user, host, Select_priv, Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Reload_priv, Shutdown_priv, Process_priv, File_priv, Grant_priv, References_priv, Index_priv, Alter_priv, Show_db_priv, Super_priv, Create_tmp_table_priv, Lock_tables_priv, Execute_priv, Repl_slave_priv, Repl_client_priv, Create_view_priv, Show_view_priv, Create_routine_priv, Alter_routine_priv, Create_user_priv, Event_priv, Trigger_priv, Create_tablespace_priv, max_questions, max_updates, max_connections, max_user_connections FROM mysql.user ` // Tunable flags. var ( userPrivilegesFlag = kingpin.Flag( "collect.mysql.user.privileges", "Enable collecting user privileges from mysql.user", ).Default("false").Bool() ) var ( labelNames = []string{"mysql_user", "hostmask"} ) // Metric descriptors. var ( userMaxQuestionsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, mysqlSubsystem, "max_questions"), "The number of max_questions by user.", labelNames, nil) userMaxUpdatesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, mysqlSubsystem, "max_updates"), "The number of max_updates by user.", labelNames, nil) userMaxConnectionsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, mysqlSubsystem, "max_connections"), "The number of max_connections by user.", labelNames, nil) userMaxUserConnectionsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, mysqlSubsystem, "max_user_connections"), "The number of max_user_connections by user.", labelNames, nil) ) // ScrapeUser collects from `information_schema.processlist`. type ScrapeUser struct{} // Name of the Scraper. Should be unique. func (ScrapeUser) Name() string { return mysqlSubsystem + ".user" } // Help describes the role of the Scraper. func (ScrapeUser) Help() string { return "Collect data from mysql.user" } // Version of MySQL from which scraper is available. func (ScrapeUser) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var ( userRows *sql.Rows err error ) userQuery := fmt.Sprint(mysqlUserQuery) userRows, err = db.QueryContext(ctx, userQuery) if err != nil { return err } defer userRows.Close() var ( user string host string Select_priv string Insert_priv string Update_priv string Delete_priv string Create_priv string Drop_priv string Reload_priv string Shutdown_priv string Process_priv string File_priv string Grant_priv string References_priv string Index_priv string Alter_priv string Show_db_priv string Super_priv string Create_tmp_table_priv string Lock_tables_priv string Execute_priv string Repl_slave_priv string Repl_client_priv string Create_view_priv string Show_view_priv string Create_routine_priv string Alter_routine_priv string Create_user_priv string Event_priv string Trigger_priv string Create_tablespace_priv string max_questions uint32 max_updates uint32 max_connections uint32 max_user_connections uint32 ) for userRows.Next() { err = userRows.Scan( &user, &host, &Select_priv, &Insert_priv, &Update_priv, &Delete_priv, &Create_priv, &Drop_priv, &Reload_priv, &Shutdown_priv, &Process_priv, &File_priv, &Grant_priv, &References_priv, &Index_priv, &Alter_priv, &Show_db_priv, &Super_priv, &Create_tmp_table_priv, &Lock_tables_priv, &Execute_priv, &Repl_slave_priv, &Repl_client_priv, &Create_view_priv, &Show_view_priv, &Create_routine_priv, &Alter_routine_priv, &Create_user_priv, &Event_priv, &Trigger_priv, &Create_tablespace_priv, &max_questions, &max_updates, &max_connections, &max_user_connections, ) if err != nil { return err } if *userPrivilegesFlag { userCols, err := userRows.Columns() if err != nil { return err } scanArgs := make([]interface{}, len(userCols)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } if err := userRows.Scan(scanArgs...); err != nil { return err } for i, col := range userCols { if value, ok := parsePrivilege(*scanArgs[i].(*sql.RawBytes)); ok { // Silently skip unparsable values. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, mysqlSubsystem, strings.ToLower(col)), col+" by user.", labelNames, nil, ), prometheus.GaugeValue, value, user, host, ) } } } ch <- prometheus.MustNewConstMetric(userMaxQuestionsDesc, prometheus.GaugeValue, float64(max_questions), user, host) ch <- prometheus.MustNewConstMetric(userMaxUpdatesDesc, prometheus.GaugeValue, float64(max_updates), user, host) ch <- prometheus.MustNewConstMetric(userMaxConnectionsDesc, prometheus.GaugeValue, float64(max_connections), user, host) ch <- prometheus.MustNewConstMetric(userMaxUserConnectionsDesc, prometheus.GaugeValue, float64(max_user_connections), user, host) } return nil } mysqld_exporter-0.15.0/collector/perf_schema.go000066400000000000000000000012321444546573200216430ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector // Subsystem. const performanceSchema = "perf_schema" mysqld_exporter-0.15.0/collector/perf_schema_events_statements.go000066400000000000000000000232151444546573200255030ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.events_statements_summary_by_digest`. package collector import ( "context" "database/sql" "fmt" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfEventsStatementsQuery = ` SELECT ifnull(SCHEMA_NAME, 'NONE') as SCHEMA_NAME, DIGEST, LEFT(DIGEST_TEXT, %d) as DIGEST_TEXT, COUNT_STAR, SUM_TIMER_WAIT, SUM_ERRORS, SUM_WARNINGS, SUM_ROWS_AFFECTED, SUM_ROWS_SENT, SUM_ROWS_EXAMINED, SUM_CREATED_TMP_DISK_TABLES, SUM_CREATED_TMP_TABLES, SUM_SORT_MERGE_PASSES, SUM_SORT_ROWS, SUM_NO_INDEX_USED FROM ( SELECT * FROM performance_schema.events_statements_summary_by_digest WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema') AND LAST_SEEN > DATE_SUB(NOW(), INTERVAL %d SECOND) ORDER BY LAST_SEEN DESC )Q GROUP BY Q.SCHEMA_NAME, Q.DIGEST, Q.DIGEST_TEXT, Q.COUNT_STAR, Q.SUM_TIMER_WAIT, Q.SUM_ERRORS, Q.SUM_WARNINGS, Q.SUM_ROWS_AFFECTED, Q.SUM_ROWS_SENT, Q.SUM_ROWS_EXAMINED, Q.SUM_CREATED_TMP_DISK_TABLES, Q.SUM_CREATED_TMP_TABLES, Q.SUM_SORT_MERGE_PASSES, Q.SUM_SORT_ROWS, Q.SUM_NO_INDEX_USED ORDER BY SUM_TIMER_WAIT DESC LIMIT %d ` // Tunable flags. var ( perfEventsStatementsLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.limit", "Limit the number of events statements digests by response time", ).Default("250").Int() perfEventsStatementsTimeLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.timelimit", "Limit how old the 'last_seen' events statements can be, in seconds", ).Default("86400").Int() perfEventsStatementsDigestTextLimit = kingpin.Flag( "collect.perf_schema.eventsstatements.digest_text_limit", "Maximum length of the normalized statement text", ).Default("120").Int() ) // Metric descriptors. var ( performanceSchemaEventsStatementsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_total"), "The total count of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_seconds_total"), "The total time of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_errors_total"), "The errors of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsWarningsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_warnings_total"), "The warnings of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsAffectedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_affected_total"), "The total rows affected of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsSentDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_sent_total"), "The total rows sent of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsRowsExaminedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_rows_examined_total"), "The total rows examined of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_tables_total"), "The total tmp tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsTmpDiskTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_tmp_disk_tables_total"), "The total tmp disk tables of events statements by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortMergePassesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_merge_passes_total"), "The total number of merge passes by the sort algorithm performed by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsSortRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sort_rows_total"), "The total number of sorted rows by digest.", []string{"schema", "digest", "digest_text"}, nil, ) performanceSchemaEventsStatementsNoIndexUsedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_no_index_used_total"), "The total number of statements that used full table scans by digest.", []string{"schema", "digest", "digest_text"}, nil, ) ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. type ScrapePerfEventsStatements struct{} // Name of the Scraper. Should be unique. func (ScrapePerfEventsStatements) Name() string { return "perf_schema.eventsstatements" } // Help describes the role of the Scraper. func (ScrapePerfEventsStatements) Help() string { return "Collect metrics from performance_schema.events_statements_summary_by_digest" } // Version of MySQL from which scraper is available. func (ScrapePerfEventsStatements) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, *perfEventsStatementsDigestTextLimit, *perfEventsStatementsTimeLimit, *perfEventsStatementsLimit, ) // Timers here are returned in picoseconds. perfSchemaEventsStatementsRows, err := db.QueryContext(ctx, perfQuery) if err != nil { return err } defer perfSchemaEventsStatementsRows.Close() var ( schemaName, digest, digestText string count, queryTime, errors, warnings uint64 rowsAffected, rowsSent, rowsExamined uint64 tmpTables, tmpDiskTables uint64 sortMergePasses, sortRows uint64 noIndexUsed uint64 ) for perfSchemaEventsStatementsRows.Next() { if err := perfSchemaEventsStatementsRows.Scan( &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, &sortMergePasses, &sortRows, &noIndexUsed, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsDesc, prometheus.CounterValue, float64(count), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTimeDesc, prometheus.CounterValue, float64(queryTime)/picoSeconds, schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsErrorsDesc, prometheus.CounterValue, float64(errors), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsWarningsDesc, prometheus.CounterValue, float64(warnings), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsAffectedDesc, prometheus.CounterValue, float64(rowsAffected), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsSentDesc, prometheus.CounterValue, float64(rowsSent), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsRowsExaminedDesc, prometheus.CounterValue, float64(rowsExamined), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpTablesDesc, prometheus.CounterValue, float64(tmpTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsTmpDiskTablesDesc, prometheus.CounterValue, float64(tmpDiskTables), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortMergePassesDesc, prometheus.CounterValue, float64(sortMergePasses), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSortRowsDesc, prometheus.CounterValue, float64(sortRows), schemaName, digest, digestText, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsNoIndexUsedDesc, prometheus.CounterValue, float64(noIndexUsed), schemaName, digest, digestText, ) } return nil } // check interface var _ Scraper = ScrapePerfEventsStatements{} mysqld_exporter-0.15.0/collector/perf_schema_events_statements_sum.go000066400000000000000000000275771444546573200264060ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.events_statements_summary_by_digest`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfEventsStatementsSumQuery = ` SELECT SUM(COUNT_STAR) AS SUM_COUNT_STAR, SUM(SUM_CREATED_TMP_DISK_TABLES) AS SUM_SUM_CREATED_TMP_DISK_TABLES, SUM(SUM_CREATED_TMP_TABLES) AS SUM_SUM_CREATED_TMP_TABLES, SUM(SUM_ERRORS) AS SUM_SUM_ERRORS, SUM(SUM_LOCK_TIME) AS SUM_SUM_LOCK_TIME, SUM(SUM_NO_GOOD_INDEX_USED) AS SUM_SUM_NO_GOOD_INDEX_USED, SUM(SUM_NO_INDEX_USED) AS SUM_SUM_NO_INDEX_USED, SUM(SUM_ROWS_AFFECTED) AS SUM_SUM_ROWS_AFFECTED, SUM(SUM_ROWS_EXAMINED) AS SUM_SUM_ROWS_EXAMINED, SUM(SUM_ROWS_SENT) AS SUM_SUM_ROWS_SENT, SUM(SUM_SELECT_FULL_JOIN) AS SUM_SUM_SELECT_FULL_JOIN, SUM(SUM_SELECT_FULL_RANGE_JOIN) AS SUM_SUM_SELECT_FULL_RANGE_JOIN, SUM(SUM_SELECT_RANGE) AS SUM_SUM_SELECT_RANGE, SUM(SUM_SELECT_RANGE_CHECK) AS SUM_SUM_SELECT_RANGE_CHECK, SUM(SUM_SELECT_SCAN) AS SUM_SUM_SELECT_SCAN, SUM(SUM_SORT_MERGE_PASSES) AS SUM_SUM_SORT_MERGE_PASSES, SUM(SUM_SORT_RANGE) AS SUM_SUM_SORT_RANGE, SUM(SUM_SORT_ROWS) AS SUM_SUM_SORT_ROWS, SUM(SUM_SORT_SCAN) AS SUM_SUM_SORT_SCAN, SUM(SUM_TIMER_WAIT) AS SUM_SUM_TIMER_WAIT, SUM(SUM_WARNINGS) AS SUM_SUM_WARNINGS FROM performance_schema.events_statements_summary_by_digest; ` // Metric descriptors. var ( performanceSchemaEventsStatementsSumTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_total"), "The total count of events statements.", nil, nil, ) performanceSchemaEventsStatementsSumCreatedTmpDiskTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_created_tmp_disk_tables"), "The number of on-disk temporary tables created.", nil, nil, ) performanceSchemaEventsStatementsSumCreatedTmpTablesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_created_tmp_tables"), "The number of temporary tables created.", nil, nil, ) performanceSchemaEventsStatementsSumErrorsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_errors"), "Number of errors.", nil, nil, ) performanceSchemaEventsStatementsSumLockTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_lock_time"), "Time in picoseconds spent waiting for locks.", nil, nil, ) performanceSchemaEventsStatementsSumNoGoodIndexUsedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_no_good_index_used"), "Number of times no good index was found.", nil, nil, ) performanceSchemaEventsStatementsSumNoIndexUsedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_no_index_used"), "Number of times no index was found.", nil, nil, ) performanceSchemaEventsStatementsSumRowsAffectedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_rows_affected"), "Number of rows affected by statements.", nil, nil, ) performanceSchemaEventsStatementsSumRowsExaminedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_rows_examined"), "Number of rows read during statements' execution.", nil, nil, ) performanceSchemaEventsStatementsSumRowsSentDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_rows_sent"), "Number of rows returned.", nil, nil, ) performanceSchemaEventsStatementsSumSelectFullJoinDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_select_full_join"), "Number of joins performed by statements which did not use an index.", nil, nil, ) performanceSchemaEventsStatementsSumSelectFullRangeJoinDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_select_full_range_join"), "Number of joins performed by statements which used a range search of the first table.", nil, nil, ) performanceSchemaEventsStatementsSumSelectRangeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_select_range"), "Number of joins performed by statements which used a range of the first table.", nil, nil, ) performanceSchemaEventsStatementsSumSelectRangeCheckDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_select_range_check"), "Number of joins without keys performed by statements that check for key usage after each row.", nil, nil, ) performanceSchemaEventsStatementsSumSelectScanDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_select_scan"), "Number of joins performed by statements which used a full scan of the first table.", nil, nil, ) performanceSchemaEventsStatementsSumSortMergePassesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_sort_merge_passes"), "Number of merge passes by the sort algorithm performed by statements.", nil, nil, ) performanceSchemaEventsStatementsSumSortRangeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_sort_range"), "Number of sorts performed by statements which used a range.", nil, nil, ) performanceSchemaEventsStatementsSumSortRowsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_sort_rows"), "Number of rows sorted.", nil, nil, ) performanceSchemaEventsStatementsSumSortScanDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_sort_scan"), "Number of sorts performed by statements which used a full table scan.", nil, nil, ) performanceSchemaEventsStatementsSumTimerWaitDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_timer_wait"), "Total wait time of the summarized events that are timed.", nil, nil, ) performanceSchemaEventsStatementsSumWarningsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_statements_sum_warnings"), "Number of warnings.", nil, nil, ) ) // ScrapePerfEventsStatementsSum collects from `performance_schema.events_statements_summary_by_digest`. type ScrapePerfEventsStatementsSum struct{} // Name of the Scraper. Should be unique. func (ScrapePerfEventsStatementsSum) Name() string { return "perf_schema.eventsstatementssum" } // Help describes the role of the Scraper. func (ScrapePerfEventsStatementsSum) Help() string { return "Collect metrics of grand sums from performance_schema.events_statements_summary_by_digest" } // Version of MySQL from which scraper is available. func (ScrapePerfEventsStatementsSum) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfEventsStatementsSum) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { // Timers here are returned in picoseconds. perfEventsStatementsSumRows, err := db.QueryContext(ctx, perfEventsStatementsSumQuery) if err != nil { return err } defer perfEventsStatementsSumRows.Close() var ( total, createdTmpDiskTables, createdTmpTables, errors uint64 lockTime, noGoodIndexUsed, noIndexUsed, rowsAffected uint64 rowsExamined, rowsSent, selectFullJoin uint64 selectFullRangeJoin, selectRange, selectRangeCheck uint64 selectScan, sortMergePasses, sortRange, sortRows uint64 sortScan, timerWait, warnings uint64 ) for perfEventsStatementsSumRows.Next() { if err := perfEventsStatementsSumRows.Scan( &total, &createdTmpDiskTables, &createdTmpTables, &errors, &lockTime, &noGoodIndexUsed, &noIndexUsed, &rowsAffected, &rowsExamined, &rowsSent, &selectFullJoin, &selectFullRangeJoin, &selectRange, &selectRangeCheck, &selectScan, &sortMergePasses, &sortRange, &sortRows, &sortScan, &timerWait, &warnings, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumTotalDesc, prometheus.CounterValue, float64(total), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumCreatedTmpDiskTablesDesc, prometheus.CounterValue, float64(createdTmpDiskTables), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumCreatedTmpTablesDesc, prometheus.CounterValue, float64(createdTmpTables), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumErrorsDesc, prometheus.CounterValue, float64(errors), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumLockTimeDesc, prometheus.CounterValue, float64(lockTime), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumNoGoodIndexUsedDesc, prometheus.CounterValue, float64(noGoodIndexUsed), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumNoIndexUsedDesc, prometheus.CounterValue, float64(noIndexUsed), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumRowsAffectedDesc, prometheus.CounterValue, float64(rowsAffected), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumRowsExaminedDesc, prometheus.CounterValue, float64(rowsExamined), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumRowsSentDesc, prometheus.CounterValue, float64(rowsSent), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSelectFullJoinDesc, prometheus.CounterValue, float64(selectFullJoin), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSelectFullRangeJoinDesc, prometheus.CounterValue, float64(selectFullRangeJoin), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSelectRangeDesc, prometheus.CounterValue, float64(selectRange), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSelectRangeCheckDesc, prometheus.CounterValue, float64(selectRangeCheck), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSelectScanDesc, prometheus.CounterValue, float64(selectScan), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSortMergePassesDesc, prometheus.CounterValue, float64(sortMergePasses), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSortRangeDesc, prometheus.CounterValue, float64(sortRange), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSortRowsDesc, prometheus.CounterValue, float64(sortRows), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumSortScanDesc, prometheus.CounterValue, float64(sortScan), ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumTimerWaitDesc, prometheus.CounterValue, float64(timerWait)/picoSeconds, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsStatementsSumWarningsDesc, prometheus.CounterValue, float64(warnings), ) } return nil } // check interface var _ Scraper = ScrapePerfEventsStatementsSum{} mysqld_exporter-0.15.0/collector/perf_schema_events_waits.go000066400000000000000000000056431444546573200244500ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.events_waits_summary_global_by_event_name`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfEventsWaitsQuery = ` SELECT EVENT_NAME, COUNT_STAR, SUM_TIMER_WAIT FROM performance_schema.events_waits_summary_global_by_event_name ` // Metric descriptors. var ( performanceSchemaEventsWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_total"), "The total events waits by event name.", []string{"event_name"}, nil, ) performanceSchemaEventsWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "events_waits_seconds_total"), "The total seconds of events waits by event name.", []string{"event_name"}, nil, ) ) // ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`. type ScrapePerfEventsWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfEventsWaits) Name() string { return "perf_schema.eventswaits" } // Help describes the role of the Scraper. func (ScrapePerfEventsWaits) Help() string { return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name" } // Version of MySQL from which scraper is available. func (ScrapePerfEventsWaits) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfEventsWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { // Timers here are returned in picoseconds. perfSchemaEventsWaitsRows, err := db.QueryContext(ctx, perfEventsWaitsQuery) if err != nil { return err } defer perfSchemaEventsWaitsRows.Close() var ( eventName string count, time uint64 ) for perfSchemaEventsWaitsRows.Next() { if err := perfSchemaEventsWaitsRows.Scan( &eventName, &count, &time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsDesc, prometheus.CounterValue, float64(count), eventName, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsTimeDesc, prometheus.CounterValue, float64(time)/picoSeconds, eventName, ) } return nil } // check interface var _ Scraper = ScrapePerfEventsWaits{} mysqld_exporter-0.15.0/collector/perf_schema_file_events.go000066400000000000000000000105551444546573200242360ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.file_summary_by_event_name`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfFileEventsQuery = ` SELECT EVENT_NAME, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM performance_schema.file_summary_by_event_name ` // Metric descriptors. var ( performanceSchemaFileEventsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_total"), "The total file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_seconds_total"), "The total seconds of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) performanceSchemaFileEventsBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_events_bytes_total"), "The total bytes of file events by event name/mode.", []string{"event_name", "mode"}, nil, ) ) // ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. type ScrapePerfFileEvents struct{} // Name of the Scraper. Should be unique. func (ScrapePerfFileEvents) Name() string { return "perf_schema.file_events" } // Help describes the role of the Scraper. func (ScrapePerfFileEvents) Help() string { return "Collect metrics from performance_schema.file_summary_by_event_name" } // Version of MySQL from which scraper is available. func (ScrapePerfFileEvents) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfFileEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { // Timers here are returned in picoseconds. perfSchemaFileEventsRows, err := db.QueryContext(ctx, perfFileEventsQuery) if err != nil { return err } defer perfSchemaFileEventsRows.Close() var ( eventName string countRead, timeRead, bytesRead uint64 countWrite, timeWrite, bytesWrite uint64 countMisc, timeMisc uint64 ) for perfSchemaFileEventsRows.Next() { if err := perfSchemaFileEventsRows.Scan( &eventName, &countRead, &timeRead, &bytesRead, &countWrite, &timeWrite, &bytesWrite, &countMisc, &timeMisc, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeRead)/picoSeconds, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeWrite)/picoSeconds, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countMisc), eventName, "misc", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeMisc)/picoSeconds, eventName, "misc", ) } return nil } // check interface var _ Scraper = ScrapePerfFileEvents{} mysqld_exporter-0.15.0/collector/perf_schema_file_instances.go000066400000000000000000000102511444546573200247120ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.file_summary_by_instance`. package collector import ( "context" "database/sql" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfFileInstancesQuery = ` SELECT FILE_NAME, EVENT_NAME, COUNT_READ, COUNT_WRITE, SUM_NUMBER_OF_BYTES_READ, SUM_NUMBER_OF_BYTES_WRITE FROM performance_schema.file_summary_by_instance where FILE_NAME REGEXP ? ` // Tunable flags. var ( performanceSchemaFileInstancesFilter = kingpin.Flag( "collect.perf_schema.file_instances.filter", "RegEx file_name filter for performance_schema.file_summary_by_instance", ).Default(".*").String() performanceSchemaFileInstancesRemovePrefix = kingpin.Flag( "collect.perf_schema.file_instances.remove_prefix", "Remove path prefix in performance_schema.file_summary_by_instance", ).Default("/var/lib/mysql/").String() ) // Metric descriptors. var ( performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_instances_bytes"), "The number of bytes processed by file read/write operations.", []string{"file_name", "event_name", "mode"}, nil, ) performanceSchemaFileInstancesCountDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "file_instances_total"), "The total number of file read/write operations.", []string{"file_name", "event_name", "mode"}, nil, ) ) // ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`. type ScrapePerfFileInstances struct{} // Name of the Scraper. Should be unique. func (ScrapePerfFileInstances) Name() string { return "perf_schema.file_instances" } // Help describes the role of the Scraper. func (ScrapePerfFileInstances) Help() string { return "Collect metrics from performance_schema.file_summary_by_instance" } // Version of MySQL from which scraper is available. func (ScrapePerfFileInstances) Version() float64 { return 5.5 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { // Timers here are returned in picoseconds. perfSchemaFileInstancesRows, err := db.QueryContext(ctx, perfFileInstancesQuery, *performanceSchemaFileInstancesFilter) if err != nil { return err } defer perfSchemaFileInstancesRows.Close() var ( fileName, eventName string countRead, countWrite uint64 sumBytesRead, sumBytesWritten uint64 ) for perfSchemaFileInstancesRows.Next() { if err := perfSchemaFileInstancesRows.Scan( &fileName, &eventName, &countRead, &countWrite, &sumBytesRead, &sumBytesWritten, ); err != nil { return err } fileName = strings.TrimPrefix(fileName, *performanceSchemaFileInstancesRemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead), fileName, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countWrite), fileName, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesRead), fileName, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesWritten), fileName, eventName, "write", ) } return nil } // check interface var _ Scraper = ScrapePerfFileInstances{} mysqld_exporter-0.15.0/collector/perf_schema_file_instances_test.go000066400000000000000000000074651444546573200257660ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapePerfFileInstances(t *testing.T) { _, err := kingpin.CommandLine.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"FILE_NAME", "EVENT_NAME", "COUNT_READ", "COUNT_WRITE", "SUM_NUMBER_OF_BYTES_READ", "SUM_NUMBER_OF_BYTES_WRITE"} rows := sqlmock.NewRows(columns). AddRow("/var/lib/mysql/db1/file", "event1", "3", "4", "725", "128"). AddRow("/var/lib/mysql/db2/file", "event2", "23", "12", "3123", "967"). AddRow("db3/file", "event3", "45", "32", "1337", "326") mock.ExpectQuery(sanitizeQuery(perfFileInstancesQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfFileInstances{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "read"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "write"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "read"}, value: 725, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db1/file", "event_name": "event1", "mode": "write"}, value: 128, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "read"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "write"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "read"}, value: 3123, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db2/file", "event_name": "event2", "mode": "write"}, value: 967, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "read"}, value: 45, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "write"}, value: 32, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "read"}, value: 1337, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "db3/file", "event_name": "event3", "mode": "write"}, value: 326, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_index_io_waits.go000066400000000000000000000114321444546573200247330ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.table_io_waits_summary_by_index_usage`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfIndexIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, ifnull(INDEX_NAME, 'NONE') as INDEX_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_index_usage WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaIndexWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_total"), "The total number of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) performanceSchemaIndexWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "index_io_waits_seconds_total"), "The total time of index I/O wait events for each index and operation.", []string{"schema", "name", "index", "operation"}, nil, ) ) // ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`. type ScrapePerfIndexIOWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfIndexIOWaits) Name() string { return "perf_schema.indexiowaits" } // Help describes the role of the Scraper. func (ScrapePerfIndexIOWaits) Help() string { return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage" } // Version of MySQL from which scraper is available. func (ScrapePerfIndexIOWaits) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfIndexIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfSchemaIndexWaitsRows, err := db.QueryContext(ctx, perfIndexIOWaitsQuery) if err != nil { return err } defer perfSchemaIndexWaitsRows.Close() var ( objectSchema, objectName, indexName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaIndexWaitsRows.Next() { if err := perfSchemaIndexWaitsRows.Scan( &objectSchema, &objectName, &indexName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, indexName, "fetch", ) // We only include the insert column when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, indexName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, indexName, "fetch", ) // We only update write columns when indexName is NONE. if indexName == "NONE" { ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, indexName, "insert", ) } ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, indexName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaIndexWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, indexName, "delete", ) } return nil } // check interface var _ Scraper = ScrapePerfIndexIOWaits{} mysqld_exporter-0.15.0/collector/perf_schema_index_io_waits_test.go000066400000000000000000000103601444546573200257710ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapePerfIndexIOWaits(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"OBJECT_SCHEMA", "OBJECT_NAME", "INDEX_NAME", "COUNT_FETCH", "COUNT_INSERT", "COUNT_UPDATE", "COUNT_DELETE", "SUM_TIMER_FETCH", "SUM_TIMER_INSERT", "SUM_TIMER_UPDATE", "SUM_TIMER_DELETE"} rows := sqlmock.NewRows(columns). // Note, timers are in picoseconds. AddRow("database", "table", "index", "10", "11", "12", "13", "14000000000000", "15000000000000", "16000000000000", "17000000000000"). AddRow("database", "table", "NONE", "20", "21", "22", "23", "24000000000000", "25000000000000", "26000000000000", "27000000000000") mock.ExpectQuery(sanitizeQuery(perfIndexIOWaitsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfIndexIOWaits{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 10, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 13, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "fetch"}, value: 14, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "update"}, value: 16, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "index", "operation": "delete"}, value: 17, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 20, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 21, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 22, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "fetch"}, value: 24, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "insert"}, value: 25, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "update"}, value: 26, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"schema": "database", "name": "table", "index": "NONE", "operation": "delete"}, value: 27, metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_memory_events.go000066400000000000000000000074321444546573200246270ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.memory_summary_global_by_event_name`. package collector import ( "context" "database/sql" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfMemoryEventsQuery = ` SELECT EVENT_NAME, SUM_NUMBER_OF_BYTES_ALLOC, SUM_NUMBER_OF_BYTES_FREE, CURRENT_NUMBER_OF_BYTES_USED FROM performance_schema.memory_summary_global_by_event_name where COUNT_ALLOC > 0; ` // Tunable flags. var ( performanceSchemaMemoryEventsRemovePrefix = kingpin.Flag( "collect.perf_schema.memory_events.remove_prefix", "Remove instrument prefix in performance_schema.memory_summary_global_by_event_name", ).Default("memory/").String() ) // Metric descriptors. var ( performanceSchemaMemoryBytesAllocDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "memory_events_alloc_bytes_total"), "The total number of bytes allocated by events.", []string{"event_name"}, nil, ) performanceSchemaMemoryBytesFreeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "memory_events_free_bytes_total"), "The total number of bytes freed by events.", []string{"event_name"}, nil, ) perforanceSchemaMemoryUsedBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "memory_events_used_bytes"), "The number of bytes currently allocated by events.", []string{"event_name"}, nil, ) ) // ScrapePerfMemoryEvents collects from `performance_schema.memory_summary_global_by_event_name`. type ScrapePerfMemoryEvents struct{} // Name of the Scraper. Should be unique. func (ScrapePerfMemoryEvents) Name() string { return "perf_schema.memory_events" } // Help describes the role of the Scraper. func (ScrapePerfMemoryEvents) Help() string { return "Collect metrics from performance_schema.memory_summary_global_by_event_name" } // Version of MySQL from which scraper is available. func (ScrapePerfMemoryEvents) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfMemoryEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfSchemaMemoryEventsRows, err := db.QueryContext(ctx, perfMemoryEventsQuery) if err != nil { return err } defer perfSchemaMemoryEventsRows.Close() var ( eventName string bytesAlloc uint64 bytesFree uint64 currentBytes int64 ) for perfSchemaMemoryEventsRows.Next() { if err := perfSchemaMemoryEventsRows.Scan( &eventName, &bytesAlloc, &bytesFree, ¤tBytes, ); err != nil { return err } eventName := strings.TrimPrefix(eventName, *performanceSchemaMemoryEventsRemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaMemoryBytesAllocDesc, prometheus.CounterValue, float64(bytesAlloc), eventName, ) ch <- prometheus.MustNewConstMetric( performanceSchemaMemoryBytesFreeDesc, prometheus.CounterValue, float64(bytesFree), eventName, ) ch <- prometheus.MustNewConstMetric( perforanceSchemaMemoryUsedBytesDesc, prometheus.GaugeValue, float64(currentBytes), eventName, ) } return nil } // check interface var _ Scraper = ScrapePerfMemoryEvents{} mysqld_exporter-0.15.0/collector/perf_schema_memory_events_test.go000066400000000000000000000066331444546573200256700ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "fmt" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapePerfMemoryEvents(t *testing.T) { _, err := kingpin.CommandLine.Parse([]string{}) if err != nil { t.Fatal(err) } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "EVENT_NAME", "SUM_NUMBER_OF_BYTES_ALLOC", "SUM_NUMBER_OF_BYTES_FREE", "CURRENT_NUMBER_OF_BYTES_USED", } rows := sqlmock.NewRows(columns). AddRow("memory/innodb/event1", "1001", "500", "501"). AddRow("memory/performance_schema/event1", "6000", "7", "-83904"). AddRow("memory/innodb/event2", "2002", "1000", "1002"). AddRow("memory/sql/event1", "30", "4", "26") mock.ExpectQuery(sanitizeQuery(perfMemoryEventsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfMemoryEvents{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"event_name": "innodb/event1"}, value: 1001, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "innodb/event1"}, value: 500, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "innodb/event1"}, value: 501, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"event_name": "performance_schema/event1"}, value: 6000, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "performance_schema/event1"}, value: 7, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "performance_schema/event1"}, value: -83904, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"event_name": "innodb/event2"}, value: 2002, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "innodb/event2"}, value: 1000, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "innodb/event2"}, value: 1002, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"event_name": "sql/event1"}, value: 30, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "sql/event1"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"event_name": "sql/event1"}, value: 26, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_replication_applier_status_by_worker.go000066400000000000000000000256131444546573200314470ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "time" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfReplicationApplierStatsByWorkerQuery = ` SELECT CHANNEL_NAME, WORKER_ID, LAST_APPLIED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP, LAST_APPLIED_TRANSACTION_IMMEDIATE_COMMIT_TIMESTAMP, LAST_APPLIED_TRANSACTION_START_APPLY_TIMESTAMP, LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP, APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP, APPLYING_TRANSACTION_IMMEDIATE_COMMIT_TIMESTAMP, APPLYING_TRANSACTION_START_APPLY_TIMESTAMP FROM performance_schema.replication_applier_status_by_worker ` const timeLayout = "2006-01-02 15:04:05.000000" // Metric descriptors. var ( performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionOriginalCommitSecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "last_applied_transaction_original_commit_timestamp_seconds"), "A timestamp shows when the last transaction applied by this worker was committed on the original master.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionImmediateCommitSecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "last_applied_transaction_immediate_commit_timestamp_seconds"), "A timestamp shows when the last transaction applied by this worker was committed on the immediate master.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionStartApplySecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "last_applied_transaction_start_apply_timestamp_seconds"), "A timestamp shows when this worker started applying the last applied transaction.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionEndApplySecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "last_applied_transaction_end_apply_timestamp_seconds"), "A shows when this worker finished applying the last applied transaction.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionOriginalCommitSecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "applying_transaction_original_commit_timestamp_seconds"), "A timestamp that shows when the transaction this worker is currently applying was committed on the original master.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionImmediateCommitSecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "applying_transaction_immediate_commit_timestamp_seconds"), "A timestamp shows when the transaction this worker is currently applying was committed on the immediate master.", []string{"channel_name", "member_id"}, nil, ) performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionStartApplySecondDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "applying_transaction_start_apply_timestamp_seconds"), "A timestamp shows when this worker started its first attempt to apply the transaction that is currently being applied.", []string{"channel_name", "member_id"}, nil, ) ) // ScrapePerfReplicationApplierStatsByWorker collects from `performance_schema.replication_applier_status_by_worker`. type ScrapePerfReplicationApplierStatsByWorker struct{} // Name of the Scraper. Should be unique. func (ScrapePerfReplicationApplierStatsByWorker) Name() string { return performanceSchema + ".replication_applier_status_by_worker" } // Help describes the role of the Scraper. func (ScrapePerfReplicationApplierStatsByWorker) Help() string { return "Collect metrics from performance_schema.replication_applier_status_by_worker" } // Version of MySQL from which scraper is available. func (ScrapePerfReplicationApplierStatsByWorker) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfReplicationApplierStatsByWorker) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfReplicationApplierStatsByWorkerRows, err := db.QueryContext(ctx, perfReplicationApplierStatsByWorkerQuery) if err != nil { return err } defer perfReplicationApplierStatsByWorkerRows.Close() var ( channelName, workerId string lastAppliedTransactionOriginalCommit, lastAppliedTransactionImmediateCommit string lastAppliedTransactionStartApply, lastAppliedTransactionEndApply string applyingTransactionOriginalCommit, applyingTransactionImmediateCommit string applyingTransactionStartApply string lastAppliedTransactionOriginalCommitSeconds, lastAppliedTransactionImmediateCommitSeconds float64 lastAppliedTransactionStartApplySeconds, lastAppliedTransactionEndApplySeconds float64 applyingTransactionOriginalCommitSeconds, applyingTransactionImmediateCommitSeconds float64 applyingTransactionStartApplySeconds float64 ) for perfReplicationApplierStatsByWorkerRows.Next() { if err := perfReplicationApplierStatsByWorkerRows.Scan( &channelName, &workerId, &lastAppliedTransactionOriginalCommit, &lastAppliedTransactionImmediateCommit, &lastAppliedTransactionStartApply, &lastAppliedTransactionEndApply, &applyingTransactionOriginalCommit, &applyingTransactionImmediateCommit, &applyingTransactionStartApply, ); err != nil { return err } lastAppliedTransactionOriginalCommitTime, err := time.Parse(timeLayout, lastAppliedTransactionOriginalCommit) if err != nil { lastAppliedTransactionOriginalCommitTime = time.Time{} } // Check if the value is 0, use a real 0 if !lastAppliedTransactionOriginalCommitTime.IsZero() { lastAppliedTransactionOriginalCommitSeconds = float64(lastAppliedTransactionOriginalCommitTime.UnixNano()) / 1e9 } else { lastAppliedTransactionOriginalCommitSeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionOriginalCommitSecondDesc, prometheus.GaugeValue, lastAppliedTransactionOriginalCommitSeconds, channelName, workerId, ) lastAppliedTransactionImmediateCommitTime, err := time.Parse(timeLayout, lastAppliedTransactionImmediateCommit) if err != nil { lastAppliedTransactionImmediateCommitTime = time.Time{} } if !lastAppliedTransactionImmediateCommitTime.IsZero() { lastAppliedTransactionImmediateCommitSeconds = float64(lastAppliedTransactionImmediateCommitTime.UnixNano()) / 1e9 } else { lastAppliedTransactionImmediateCommitSeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionImmediateCommitSecondDesc, prometheus.GaugeValue, lastAppliedTransactionImmediateCommitSeconds, channelName, workerId, ) lastAppliedTransactionStartApplyTime, err := time.Parse(timeLayout, lastAppliedTransactionStartApply) if err != nil { lastAppliedTransactionStartApplyTime = time.Time{} } if !lastAppliedTransactionStartApplyTime.IsZero() { lastAppliedTransactionStartApplySeconds = float64(lastAppliedTransactionStartApplyTime.UnixNano()) / 1e9 } else { lastAppliedTransactionStartApplySeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionStartApplySecondDesc, prometheus.GaugeValue, lastAppliedTransactionStartApplySeconds, channelName, workerId, ) lastAppliedTransactionEndApplyTime, err := time.Parse(timeLayout, lastAppliedTransactionEndApply) if err != nil { lastAppliedTransactionEndApplyTime = time.Time{} } if !lastAppliedTransactionEndApplyTime.IsZero() { lastAppliedTransactionEndApplySeconds = float64(lastAppliedTransactionEndApplyTime.UnixNano()) / 1e9 } else { lastAppliedTransactionEndApplySeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerLastAppliedTransactionEndApplySecondDesc, prometheus.GaugeValue, lastAppliedTransactionEndApplySeconds, channelName, workerId, ) applyingTransactionOriginalCommitTime, err := time.Parse(timeLayout, applyingTransactionOriginalCommit) if err != nil { applyingTransactionOriginalCommitTime = time.Time{} } if !applyingTransactionOriginalCommitTime.IsZero() { applyingTransactionOriginalCommitSeconds = float64(applyingTransactionOriginalCommitTime.UnixNano()) / 1e9 } else { applyingTransactionOriginalCommitSeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionOriginalCommitSecondDesc, prometheus.GaugeValue, applyingTransactionOriginalCommitSeconds, channelName, workerId, ) applyingTransactionImmediateCommitTime, err := time.Parse(timeLayout, applyingTransactionImmediateCommit) if err != nil { applyingTransactionImmediateCommitTime = time.Time{} } if !applyingTransactionImmediateCommitTime.IsZero() { applyingTransactionImmediateCommitSeconds = float64(applyingTransactionImmediateCommitTime.UnixNano()) / 1e9 } else { applyingTransactionImmediateCommitSeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionImmediateCommitSecondDesc, prometheus.GaugeValue, applyingTransactionImmediateCommitSeconds, channelName, workerId, ) applyingTransactionStartApplyTime, err := time.Parse(timeLayout, applyingTransactionStartApply) if err != nil { applyingTransactionStartApplyTime = time.Time{} } if !applyingTransactionStartApplyTime.IsZero() { applyingTransactionStartApplySeconds = float64(applyingTransactionStartApplyTime.UnixNano()) / 1e9 } else { applyingTransactionStartApplySeconds = 0 } ch <- prometheus.MustNewConstMetric( performanceSchemaReplicationApplierStatsByWorkerApplyingTransactionStartApplySecondDesc, prometheus.GaugeValue, applyingTransactionStartApplySeconds, channelName, workerId, ) } return nil } // check interface var _ Scraper = ScrapePerfReplicationApplierStatsByWorker{} mysqld_exporter-0.15.0/collector/perf_schema_replication_applier_status_by_worker_test.go000066400000000000000000000105561444546573200325060ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "time" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapePerfReplicationApplierStatsByWorker(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "CHANNEL_NAME", "WORKER_ID", "LAST_APPLIED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP", "LAST_APPLIED_TRANSACTION_IMMEDIATE_COMMIT_TIMESTAMP", "LAST_APPLIED_TRANSACTION_START_APPLY_TIMESTAMP", "LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP", "APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP", "APPLYING_TRANSACTION_IMMEDIATE_COMMIT_TIMESTAMP", "APPLYING_TRANSACTION_START_APPLY_TIMESTAMP", } timeZero := "0000-00-00 00:00:00.000000" stubTime := time.Date(2019, 3, 14, 0, 0, 0, int(time.Millisecond), time.UTC) rows := sqlmock.NewRows(columns). AddRow("dummy_0", "0", timeZero, timeZero, timeZero, timeZero, timeZero, timeZero, timeZero). AddRow("dummy_1", "1", stubTime.Format(timeLayout), stubTime.Add(1*time.Minute).Format(timeLayout), stubTime.Add(2*time.Minute).Format(timeLayout), stubTime.Add(3*time.Minute).Format(timeLayout), stubTime.Add(4*time.Minute).Format(timeLayout), stubTime.Add(5*time.Minute).Format(timeLayout), stubTime.Add(6*time.Minute).Format(timeLayout)) mock.ExpectQuery(sanitizeQuery(perfReplicationApplierStatsByWorkerQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfReplicationApplierStatsByWorker{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_0", "member_id": "0"}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521600001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521660001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521720001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521780001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521840001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521900001e+9, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "dummy_1", "member_id": "1"}, value: 1.552521960001e+9, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_replication_group_member_stats.go000066400000000000000000000117261444546573200302260ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "strconv" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfReplicationGroupMemberStatsQuery = ` SELECT * FROM performance_schema.replication_group_member_stats WHERE MEMBER_ID=@@server_uuid ` var ( // The list of columns we are interesting in. // In MySQL 5.7 these are the 4 first columns available. In MySQL 8.x all 8. perfReplicationGroupMemberStats = map[string]struct { vtype prometheus.ValueType desc *prometheus.Desc }{ "COUNT_TRANSACTIONS_IN_QUEUE": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_in_queue"), "The number of transactions in the queue pending conflict detection checks.", nil, nil)}, "COUNT_TRANSACTIONS_CHECKED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_checked_total"), "The number of transactions that have been checked for conflicts.", nil, nil)}, "COUNT_CONFLICTS_DETECTED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "conflicts_detected_total"), "The number of transactions that have not passed the conflict detection check.", nil, nil)}, "COUNT_TRANSACTIONS_ROWS_VALIDATING": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_rows_validating_total"), "Number of transaction rows which can be used for certification, but have not been garbage collected.", nil, nil)}, "COUNT_TRANSACTIONS_REMOTE_IN_APPLIER_QUEUE": {prometheus.GaugeValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_remote_in_applier_queue"), "The number of transactions that this member has received from the replication group which are waiting to be applied.", nil, nil)}, "COUNT_TRANSACTIONS_REMOTE_APPLIED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_remote_applied_total"), "Number of transactions this member has received from the group and applied.", nil, nil)}, "COUNT_TRANSACTIONS_LOCAL_PROPOSED": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_local_proposed_total"), "Number of transactions which originated on this member and were sent to the group.", nil, nil)}, "COUNT_TRANSACTIONS_LOCAL_ROLLBACK": {prometheus.CounterValue, prometheus.NewDesc(prometheus.BuildFQName(namespace, performanceSchema, "transactions_local_rollback_total"), "Number of transactions which originated on this member and were rolled back by the group.", nil, nil)}, } ) // ScrapePerfReplicationGroupMemberStats collects from `performance_schema.replication_group_member_stats`. type ScrapePerfReplicationGroupMemberStats struct{} // Name of the Scraper. Should be unique. func (ScrapePerfReplicationGroupMemberStats) Name() string { return performanceSchema + ".replication_group_member_stats" } // Help describes the role of the Scraper. func (ScrapePerfReplicationGroupMemberStats) Help() string { return "Collect metrics from performance_schema.replication_group_member_stats" } // Version of MySQL from which scraper is available. func (ScrapePerfReplicationGroupMemberStats) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfReplicationGroupMemberStats) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { rows, err := db.QueryContext(ctx, perfReplicationGroupMemberStatsQuery) if err != nil { return err } defer rows.Close() var columnNames []string if columnNames, err = rows.Columns(); err != nil { return err } var scanArgs = make([]interface{}, len(columnNames)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } for rows.Next() { if err := rows.Scan(scanArgs...); err != nil { return err } for i, columnName := range columnNames { if metric, ok := perfReplicationGroupMemberStats[columnName]; ok { value, err := strconv.ParseFloat(string(*scanArgs[i].(*sql.RawBytes)), 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric(metric.desc, metric.vtype, value) } } } return nil } // check interface var _ Scraper = ScrapePerfReplicationGroupMemberStats{} mysqld_exporter-0.15.0/collector/perf_schema_replication_group_member_stats_test.go000066400000000000000000000062751444546573200312700ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapePerfReplicationGroupMemberStats(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "CHANNEL_NAME", "VIEW_ID", "MEMBER_ID", "COUNT_TRANSACTIONS_IN_QUEUE", "COUNT_TRANSACTIONS_CHECKED", "COUNT_CONFLICTS_DETECTED", "COUNT_TRANSACTIONS_ROWS_VALIDATING", "TRANSACTIONS_COMMITTED_ALL_MEMBERS", "LAST_CONFLICT_FREE_TRANSACTION", "COUNT_TRANSACTIONS_REMOTE_IN_APPLIER_QUEUE", "COUNT_TRANSACTIONS_REMOTE_APPLIED", "COUNT_TRANSACTIONS_LOCAL_PROPOSED", "COUNT_TRANSACTIONS_LOCAL_ROLLBACK", } rows := sqlmock.NewRows(columns). AddRow( "group_replication_applier", "15813535259046852:43", "e14c4f71-025f-11ea-b800-0620049edbec", float64(0), float64(7389775), float64(1), float64(48), "0515b3c2-f59f-11e9-881b-0620049edbec:1-15270987,\n8f782839-34f7-11e7-a774-060ac4f023ae:4-39:2387-161606", "0515b3c2-f59f-11e9-881b-0620049edbec:15271011", float64(2), float64(22), float64(7389759), float64(7), ) mock.ExpectQuery(sanitizeQuery(perfReplicationGroupMemberStatsQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfReplicationGroupMemberStats{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() expected := []MetricResult{ {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: float64(7389775), metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: float64(1), metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: float64(48), metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE}, {labels: labelMap{}, value: float64(22), metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: float64(7389759), metricType: dto.MetricType_COUNTER}, {labels: labelMap{}, value: float64(7), metricType: dto.MetricType_COUNTER}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range expected { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_replication_group_members.go000066400000000000000000000060241444546573200271660ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "strings" ) const perfReplicationGroupMembersQuery = ` SELECT * FROM performance_schema.replication_group_members ` // ScrapeReplicationGroupMembers collects from `performance_schema.replication_group_members`. type ScrapePerfReplicationGroupMembers struct{} // Name of the Scraper. Should be unique. func (ScrapePerfReplicationGroupMembers) Name() string { return performanceSchema + ".replication_group_members" } // Help describes the role of the Scraper. func (ScrapePerfReplicationGroupMembers) Help() string { return "Collect metrics from performance_schema.replication_group_members" } // Version of MySQL from which scraper is available. func (ScrapePerfReplicationGroupMembers) Version() float64 { return 5.7 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfReplicationGroupMembers) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfReplicationGroupMembersRows, err := db.QueryContext(ctx, perfReplicationGroupMembersQuery) if err != nil { return err } defer perfReplicationGroupMembersRows.Close() var columnNames []string if columnNames, err = perfReplicationGroupMembersRows.Columns(); err != nil { return err } var scanArgs = make([]interface{}, len(columnNames)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } for perfReplicationGroupMembersRows.Next() { if err := perfReplicationGroupMembersRows.Scan(scanArgs...); err != nil { return err } var labelNames = make([]string, len(columnNames)) var values = make([]string, len(columnNames)) for i, columnName := range columnNames { labelNames[i] = strings.ToLower(columnName) values[i] = string(*scanArgs[i].(*sql.RawBytes)) } var performanceSchemaReplicationGroupMembersMemberDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "replication_group_member_info"), "Information about the replication group member: "+ "channel_name, member_id, member_host, member_port, member_state. "+ "(member_role and member_version where available)", labelNames, nil, ) ch <- prometheus.MustNewConstMetric(performanceSchemaReplicationGroupMembersMemberDesc, prometheus.GaugeValue, 1, values...) } return nil } // check interface var _ Scraper = ScrapePerfReplicationGroupMembers{} mysqld_exporter-0.15.0/collector/perf_schema_replication_group_members_test.go000066400000000000000000000115441444546573200302300ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "testing" ) func TestScrapePerfReplicationGroupMembers(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "CHANNEL_NAME", "MEMBER_ID", "MEMBER_HOST", "MEMBER_PORT", "MEMBER_STATE", "MEMBER_ROLE", "MEMBER_VERSION", } rows := sqlmock.NewRows(columns). AddRow("group_replication_applier", "uuid1", "hostname1", "3306", "ONLINE", "PRIMARY", "8.0.19"). AddRow("group_replication_applier", "uuid2", "hostname2", "3306", "ONLINE", "SECONDARY", "8.0.19"). AddRow("group_replication_applier", "uuid3", "hostname3", "3306", "ONLINE", "SECONDARY", "8.0.19") mock.ExpectQuery(sanitizeQuery(perfReplicationGroupMembersQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfReplicationGroupMembers{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid1", "member_host": "hostname1", "member_port": "3306", "member_state": "ONLINE", "member_role": "PRIMARY", "member_version": "8.0.19"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid2", "member_host": "hostname2", "member_port": "3306", "member_state": "ONLINE", "member_role": "SECONDARY", "member_version": "8.0.19"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid3", "member_host": "hostname3", "member_port": "3306", "member_state": "ONLINE", "member_role": "SECONDARY", "member_version": "8.0.19"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed. if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } func TestScrapePerfReplicationGroupMembersMySQL57(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "CHANNEL_NAME", "MEMBER_ID", "MEMBER_HOST", "MEMBER_PORT", "MEMBER_STATE", } rows := sqlmock.NewRows(columns). AddRow("group_replication_applier", "uuid1", "hostname1", "3306", "ONLINE"). AddRow("group_replication_applier", "uuid2", "hostname2", "3306", "ONLINE"). AddRow("group_replication_applier", "uuid3", "hostname3", "3306", "ONLINE") mock.ExpectQuery(sanitizeQuery(perfReplicationGroupMembersQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapePerfReplicationGroupMembers{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() metricExpected := []MetricResult{ {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid1", "member_host": "hostname1", "member_port": "3306", "member_state": "ONLINE"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid2", "member_host": "hostname2", "member_port": "3306", "member_state": "ONLINE"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"channel_name": "group_replication_applier", "member_id": "uuid3", "member_host": "hostname3", "member_port": "3306", "member_state": "ONLINE"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range metricExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed. if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/perf_schema_table_io_waits.go000066400000000000000000000106461444546573200247210ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.table_io_waits_summary_by_table`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfTableIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE FROM performance_schema.table_io_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema') ` // Metric descriptors. var ( performanceSchemaTableWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_total"), "The total number of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaTableWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "table_io_waits_seconds_total"), "The total time of table I/O wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`. type ScrapePerfTableIOWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfTableIOWaits) Name() string { return "perf_schema.tableiowaits" } // Help describes the role of the Scraper. func (ScrapePerfTableIOWaits) Help() string { return "Collect metrics from performance_schema.table_io_waits_summary_by_table" } // Version of MySQL from which scraper is available. func (ScrapePerfTableIOWaits) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfTableIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfSchemaTableWaitsRows, err := db.QueryContext(ctx, perfTableIOWaitsQuery) if err != nil { return err } defer perfSchemaTableWaitsRows.Close() var ( objectSchema, objectName string countFetch, countInsert, countUpdate, countDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete uint64 ) for perfSchemaTableWaitsRows.Next() { if err := perfSchemaTableWaitsRows.Scan( &objectSchema, &objectName, &countFetch, &countInsert, &countUpdate, &countDelete, &timeFetch, &timeInsert, &timeUpdate, &timeDelete, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countFetch), objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countInsert), objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countUpdate), objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countDelete), objectSchema, objectName, "delete", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeFetch)/picoSeconds, objectSchema, objectName, "fetch", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeInsert)/picoSeconds, objectSchema, objectName, "insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeUpdate)/picoSeconds, objectSchema, objectName, "update", ) ch <- prometheus.MustNewConstMetric( performanceSchemaTableWaitsTimeDesc, prometheus.CounterValue, float64(timeDelete)/picoSeconds, objectSchema, objectName, "delete", ) } return nil } // check interface var _ Scraper = ScrapePerfTableIOWaits{} mysqld_exporter-0.15.0/collector/perf_schema_table_lock_waits.go000066400000000000000000000220341444546573200252340ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `performance_schema.table_lock_waits_summary_by_table`. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const perfTableLockWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_READ_NORMAL, COUNT_READ_WITH_SHARED_LOCKS, COUNT_READ_HIGH_PRIORITY, COUNT_READ_NO_INSERT, COUNT_READ_EXTERNAL, COUNT_WRITE_ALLOW_WRITE, COUNT_WRITE_CONCURRENT_INSERT, COUNT_WRITE_LOW_PRIORITY, COUNT_WRITE_NORMAL, COUNT_WRITE_EXTERNAL, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM performance_schema.table_lock_waits_summary_by_table WHERE OBJECT_SCHEMA NOT IN ('mysql', 'performance_schema', 'information_schema') ` // Metric descriptors. var ( performanceSchemaSQLTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_total"), "The total number of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_total"), "The total number of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaSQLTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "sql_lock_waits_seconds_total"), "The total time of SQL lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) performanceSchemaExternalTableLockWaitsTimeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, performanceSchema, "external_lock_waits_seconds_total"), "The total time of external lock wait events for each table and operation.", []string{"schema", "name", "operation"}, nil, ) ) // ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`. type ScrapePerfTableLockWaits struct{} // Name of the Scraper. Should be unique. func (ScrapePerfTableLockWaits) Name() string { return "perf_schema.tablelocks" } // Help describes the role of the Scraper. func (ScrapePerfTableLockWaits) Help() string { return "Collect metrics from performance_schema.table_lock_waits_summary_by_table" } // Version of MySQL from which scraper is available. func (ScrapePerfTableLockWaits) Version() float64 { return 5.6 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapePerfTableLockWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfSchemaTableLockWaitsRows, err := db.QueryContext(ctx, perfTableLockWaitsQuery) if err != nil { return err } defer perfSchemaTableLockWaitsRows.Close() var ( objectSchema string objectName string countReadNormal uint64 countReadWithSharedLocks uint64 countReadHighPriority uint64 countReadNoInsert uint64 countReadExternal uint64 countWriteAllowWrite uint64 countWriteConcurrentInsert uint64 countWriteLowPriority uint64 countWriteNormal uint64 countWriteExternal uint64 timeReadNormal uint64 timeReadWithSharedLocks uint64 timeReadHighPriority uint64 timeReadNoInsert uint64 timeReadExternal uint64 timeWriteAllowWrite uint64 timeWriteConcurrentInsert uint64 timeWriteLowPriority uint64 timeWriteNormal uint64 timeWriteExternal uint64 ) for perfSchemaTableLockWaitsRows.Next() { if err := perfSchemaTableLockWaitsRows.Scan( &objectSchema, &objectName, &countReadNormal, &countReadWithSharedLocks, &countReadHighPriority, &countReadNoInsert, &countReadExternal, &countWriteAllowWrite, &countWriteConcurrentInsert, &countWriteLowPriority, &countWriteNormal, &countWriteExternal, &timeReadNormal, &timeReadWithSharedLocks, &timeReadHighPriority, &timeReadNoInsert, &timeReadExternal, &timeWriteAllowWrite, &timeWriteConcurrentInsert, &timeWriteLowPriority, &timeWriteNormal, &timeWriteExternal, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNormal), objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadWithSharedLocks), objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadHighPriority), objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countReadNoInsert), objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteNormal), objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteAllowWrite), objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteConcurrentInsert), objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteLowPriority), objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countReadExternal), objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsDesc, prometheus.CounterValue, float64(countWriteExternal), objectSchema, objectName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNormal)/picoSeconds, objectSchema, objectName, "read_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadWithSharedLocks)/picoSeconds, objectSchema, objectName, "read_with_shared_locks", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadHighPriority)/picoSeconds, objectSchema, objectName, "read_high_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadNoInsert)/picoSeconds, objectSchema, objectName, "read_no_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteNormal)/picoSeconds, objectSchema, objectName, "write_normal", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteAllowWrite)/picoSeconds, objectSchema, objectName, "write_allow_write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteConcurrentInsert)/picoSeconds, objectSchema, objectName, "write_concurrent_insert", ) ch <- prometheus.MustNewConstMetric( performanceSchemaSQLTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteLowPriority)/picoSeconds, objectSchema, objectName, "write_low_priority", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeReadExternal)/picoSeconds, objectSchema, objectName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaExternalTableLockWaitsTimeDesc, prometheus.CounterValue, float64(timeWriteExternal)/picoSeconds, objectSchema, objectName, "write", ) } return nil } // check interface var _ Scraper = ScrapePerfTableLockWaits{} mysqld_exporter-0.15.0/collector/scraper.go000066400000000000000000000024541444546573200210350ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "github.com/go-kit/log" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" ) // Scraper is minimal interface that let's you add new prometheus metrics to mysqld_exporter. type Scraper interface { // Name of the Scraper. Should be unique. Name() string // Help describes the role of the Scraper. // Example: "Collect from SHOW ENGINE INNODB STATUS" Help() string // Version of MySQL from which scraper is available. Version() float64 // Scrape collects data from database connection and sends it over channel as prometheus metric. Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error } mysqld_exporter-0.15.0/collector/slave_hosts.go000066400000000000000000000074651444546573200217370ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape heartbeat data. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" ) const ( // slavehosts is the Metric subsystem we use. slavehosts = "slave_hosts" // heartbeatQuery is the query used to fetch the stored and current // timestamps. %s will be replaced by the database and table name. // The second column allows gets the server timestamp at the exact same // time the query is run. slaveHostsQuery = "SHOW SLAVE HOSTS" ) // Metric descriptors. var ( SlaveHostsInfo = prometheus.NewDesc( prometheus.BuildFQName(namespace, heartbeat, "mysql_slave_hosts_info"), "Information about running slaves", []string{"server_id", "slave_host", "port", "master_id", "slave_uuid"}, nil, ) ) // ScrapeSlaveHosts scrapes metrics about the replicating slaves. type ScrapeSlaveHosts struct{} // Name of the Scraper. Should be unique. func (ScrapeSlaveHosts) Name() string { return slavehosts } // Help describes the role of the Scraper. func (ScrapeSlaveHosts) Help() string { return "Scrape information from 'SHOW SLAVE HOSTS'" } // Version of MySQL from which scraper is available. func (ScrapeSlaveHosts) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeSlaveHosts) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { slaveHostsRows, err := db.QueryContext(ctx, slaveHostsQuery) if err != nil { return err } defer slaveHostsRows.Close() // fields of row var serverId string var host string var port string var rrrOrMasterId string var slaveUuidOrMasterId string // Depends on the version of MySQL being scraped var masterId string var slaveUuid string columnNames, err := slaveHostsRows.Columns() if err != nil { return err } for slaveHostsRows.Next() { // Newer versions of mysql have the following // Server_id, Host, Port, Master_id, Slave_UUID // Older versions of mysql have the following // Server_id, Host, Port, Rpl_recovery_rank, Master_id // MySQL 5.5 and MariaDB 10.5 have the following // Server_id, Host, Port, Master_id if len(columnNames) == 5 { err = slaveHostsRows.Scan(&serverId, &host, &port, &rrrOrMasterId, &slaveUuidOrMasterId) } else { err = slaveHostsRows.Scan(&serverId, &host, &port, &rrrOrMasterId) } if err != nil { return err } // if a Slave_UUID or Rpl_recovery_rank field is present if len(columnNames) == 5 { // Check to see if slaveUuidOrMasterId resembles a UUID or not // to find out if we are using an old version of MySQL if _, err = uuid.Parse(slaveUuidOrMasterId); err != nil { // We are running an older version of MySQL with no slave UUID slaveUuid = "" masterId = slaveUuidOrMasterId } else { // We are running a more recent version of MySQL slaveUuid = slaveUuidOrMasterId masterId = rrrOrMasterId } } else { slaveUuid = "" masterId = rrrOrMasterId } ch <- prometheus.MustNewConstMetric( SlaveHostsInfo, prometheus.GaugeValue, 1, serverId, host, port, masterId, slaveUuid, ) } return nil } // check interface var _ Scraper = ScrapeSlaveHosts{} mysqld_exporter-0.15.0/collector/slave_hosts_test.go000066400000000000000000000123371444546573200227700ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeSlaveHostsOldFormat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Server_id", "Host", "Port", "Rpl_recovery_rank", "Master_id"} rows := sqlmock.NewRows(columns). AddRow("380239978", "backup_server_1", "0", "1", "192168011"). AddRow("11882498", "backup_server_2", "0", "1", "192168011") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveHosts{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "380239978", "slave_host": "backup_server_1", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "11882498", "slave_host": "backup_server_2", "port": "0", "master_id": "192168011", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } func TestScrapeSlaveHostsNewFormat(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Server_id", "Host", "Port", "Master_id", "Slave_UUID"} rows := sqlmock.NewRows(columns). AddRow("192168010", "iconnect2", "3306", "192168011", "14cb6624-7f93-11e0-b2c0-c80aa9429562"). AddRow("1921680101", "athena", "3306", "192168011", "07af4990-f41f-11df-a566-7ac56fdaf645") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveHosts{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "192168010", "slave_host": "iconnect2", "port": "3306", "master_id": "192168011", "slave_uuid": "14cb6624-7f93-11e0-b2c0-c80aa9429562"}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "1921680101", "slave_host": "athena", "port": "3306", "master_id": "192168011", "slave_uuid": "07af4990-f41f-11df-a566-7ac56fdaf645"}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } func TestScrapeSlaveHostsWithoutSlaveUuid(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Server_id", "Host", "Port", "Master_id"} rows := sqlmock.NewRows(columns). AddRow("192168010", "iconnect2", "3306", "192168012"). AddRow("1921680101", "athena", "3306", "192168012") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE HOSTS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveHosts{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"server_id": "192168010", "slave_host": "iconnect2", "port": "3306", "master_id": "192168012", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"server_id": "1921680101", "slave_host": "athena", "port": "3306", "master_id": "192168012", "slave_uuid": ""}, value: 1, metricType: dto.MetricType_GAUGE}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/slave_status.go000066400000000000000000000076661444546573200221250ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Scrape `SHOW SLAVE STATUS`. package collector import ( "context" "database/sql" "fmt" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( // Subsystem. slaveStatus = "slave_status" ) var slaveStatusQueries = [2]string{"SHOW ALL SLAVES STATUS", "SHOW SLAVE STATUS"} var slaveStatusQuerySuffixes = [3]string{" NONBLOCKING", " NOLOCK", ""} func columnIndex(slaveCols []string, colName string) int { for idx := range slaveCols { if slaveCols[idx] == colName { return idx } } return -1 } func columnValue(scanArgs []interface{}, slaveCols []string, colName string) string { var columnIndex = columnIndex(slaveCols, colName) if columnIndex == -1 { return "" } return string(*scanArgs[columnIndex].(*sql.RawBytes)) } // ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`. type ScrapeSlaveStatus struct{} // Name of the Scraper. Should be unique. func (ScrapeSlaveStatus) Name() string { return slaveStatus } // Help describes the role of the Scraper. func (ScrapeSlaveStatus) Help() string { return "Collect from SHOW SLAVE STATUS" } // Version of MySQL from which scraper is available. func (ScrapeSlaveStatus) Version() float64 { return 5.1 } // Scrape collects data from database connection and sends it over channel as prometheus metric. func (ScrapeSlaveStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var ( slaveStatusRows *sql.Rows err error ) // Try the both syntax for MySQL/Percona and MariaDB for _, query := range slaveStatusQueries { slaveStatusRows, err = db.QueryContext(ctx, query) if err != nil { // MySQL/Percona // Leverage lock-free SHOW SLAVE STATUS by guessing the right suffix for _, suffix := range slaveStatusQuerySuffixes { slaveStatusRows, err = db.QueryContext(ctx, fmt.Sprint(query, suffix)) if err == nil { break } } } else { // MariaDB break } } if err != nil { return err } defer slaveStatusRows.Close() slaveCols, err := slaveStatusRows.Columns() if err != nil { return err } for slaveStatusRows.Next() { // As the number of columns varies with mysqld versions, // and sql.Scan requires []interface{}, we need to create a // slice of pointers to the elements of slaveData. scanArgs := make([]interface{}, len(slaveCols)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } if err := slaveStatusRows.Scan(scanArgs...); err != nil { return err } masterUUID := columnValue(scanArgs, slaveCols, "Master_UUID") masterHost := columnValue(scanArgs, slaveCols, "Master_Host") channelName := columnValue(scanArgs, slaveCols, "Channel_Name") // MySQL & Percona connectionName := columnValue(scanArgs, slaveCols, "Connection_name") // MariaDB for i, col := range slaveCols { if value, ok := parseStatus(*scanArgs[i].(*sql.RawBytes)); ok { // Silently skip unparsable values. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, slaveStatus, strings.ToLower(col)), "Generic metric from SHOW SLAVE STATUS.", []string{"master_host", "master_uuid", "channel_name", "connection_name"}, nil, ), prometheus.UntypedValue, value, masterHost, masterUUID, channelName, connectionName, ) } } } return nil } // check interface var _ Scraper = ScrapeSlaveStatus{} mysqld_exporter-0.15.0/collector/slave_status_test.go000066400000000000000000000047611444546573200231550ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" ) func TestScrapeSlaveStatus(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{"Master_Host", "Read_Master_Log_Pos", "Slave_IO_Running", "Slave_SQL_Running", "Seconds_Behind_Master"} rows := sqlmock.NewRows(columns). AddRow("127.0.0.1", "1", "Connecting", "Yes", "2") mock.ExpectQuery(sanitizeQuery("SHOW SLAVE STATUS")).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSlaveStatus{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() counterExpected := []MetricResult{ {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 0, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 1, metricType: dto.MetricType_UNTYPED}, {labels: labelMap{"channel_name": "", "connection_name": "", "master_host": "127.0.0.1", "master_uuid": ""}, value: 2, metricType: dto.MetricType_UNTYPED}, } convey.Convey("Metrics comparison", t, func() { for _, expect := range counterExpected { got := readMetric(<-ch) convey.So(got, convey.ShouldResemble, expect) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/collector/sys.go000066400000000000000000000011751444546573200202130ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector const sysSchema = "sys" mysqld_exporter-0.15.0/collector/sys_user_summary.go000066400000000000000000000134551444546573200230320ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const sysUserSummaryQuery = ` SELECT user, statements, statement_latency, table_scans, file_ios, file_io_latency, current_connections, total_connections, unique_hosts, current_memory, total_memory_allocated FROM ` + sysSchema + `.x$user_summary ` var ( sysUserSummaryStatements = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "statements_total"), " The total number of statements for the user", []string{"user"}, nil) sysUserSummaryStatementLatency = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "statement_latency"), "The total wait time of timed statements for the user", []string{"user"}, nil) sysUserSummaryTableScans = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "table_scans_total"), "The total number of table scans for the user", []string{"user"}, nil) sysUserSummaryFileIOs = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "file_ios_total"), "The total number of file I/O events for the user", []string{"user"}, nil) sysUserSummaryFileIOLatency = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "file_io_seconds_total"), "The total wait time of timed file I/O events for the user", []string{"user"}, nil) sysUserSummaryCurrentConnections = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "current_connections"), "The current number of connections for the user", []string{"user"}, nil) sysUserSummaryTotalConnections = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "connections_total"), "The total number of connections for the user", []string{"user"}, nil) sysUserSummaryUniqueHosts = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "unique_hosts_total"), "The number of distinct hosts from which connections for the user have originated", []string{"user"}, nil) sysUserSummaryCurrentMemory = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "current_memory_bytes"), "The current amount of allocated memory for the user", []string{"user"}, nil) sysUserSummaryTotalMemoryAllocated = prometheus.NewDesc( prometheus.BuildFQName(namespace, sysSchema, "memory_allocated_bytes_total"), "The total amount of allocated memory for the user", []string{"user"}, nil) ) type ScrapeSysUserSummary struct{} // Name of the Scraper. Should be unique. func (ScrapeSysUserSummary) Name() string { return sysSchema + ".user_summary" } // Help describes the role of the Scraper. func (ScrapeSysUserSummary) Help() string { return "Collect per user metrics from sys.x$user_summary. See https://dev.mysql.com/doc/refman/5.7/en/sys-user-summary.html for details" } // Version of MySQL from which scraper is available. func (ScrapeSysUserSummary) Version() float64 { return 5.7 } // Scrape the information from sys.user_summary, creating a metric for each value of each row, labeled with the user func (ScrapeSysUserSummary) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { userSummaryRows, err := db.QueryContext(ctx, sysUserSummaryQuery) if err != nil { return err } defer userSummaryRows.Close() var ( user string statements uint64 statement_latency float64 table_scans uint64 file_ios uint64 file_io_latency float64 current_connections uint64 total_connections uint64 unique_hosts uint64 current_memory uint64 total_memory_allocated uint64 ) for userSummaryRows.Next() { err = userSummaryRows.Scan( &user, &statements, &statement_latency, &table_scans, &file_ios, &file_io_latency, ¤t_connections, &total_connections, &unique_hosts, ¤t_memory, &total_memory_allocated, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric(sysUserSummaryStatements, prometheus.CounterValue, float64(statements), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryStatementLatency, prometheus.CounterValue, float64(statement_latency)/picoSeconds, user) ch <- prometheus.MustNewConstMetric(sysUserSummaryTableScans, prometheus.CounterValue, float64(table_scans), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryFileIOs, prometheus.CounterValue, float64(file_ios), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryFileIOLatency, prometheus.CounterValue, float64(file_io_latency)/picoSeconds, user) ch <- prometheus.MustNewConstMetric(sysUserSummaryCurrentConnections, prometheus.GaugeValue, float64(current_connections), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryTotalConnections, prometheus.CounterValue, float64(total_connections), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryUniqueHosts, prometheus.CounterValue, float64(unique_hosts), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryCurrentMemory, prometheus.GaugeValue, float64(current_memory), user) ch <- prometheus.MustNewConstMetric(sysUserSummaryTotalMemoryAllocated, prometheus.CounterValue, float64(total_memory_allocated), user) } return nil } var _ Scraper = ScrapeSysUserSummary{} mysqld_exporter-0.15.0/collector/sys_user_summary_test.go000066400000000000000000000062351444546573200240670ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "context" "database/sql/driver" "github.com/DATA-DOG/go-sqlmock" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" "regexp" "strconv" "testing" ) func TestScrapeSysUserSummary(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("error opening a stub database connection: %s", err) } defer db.Close() columns := []string{ "user", "statemets", "statement_latency", "table_scans", "file_ios", "file_io_latency", "current_connections", "total_connections", "unique_hosts", "current_memory", "total_memory_allocated", } rows := sqlmock.NewRows(columns) queryResults := [][]driver.Value{ { "user1", "110", "120", "140", "150", "160", "170", "180", "190", "110", "111", }, { "user2", "210", "220", "240", "250", "260", "270", "280", "290", "210", "211", }, } expectedMetrics := []MetricResult{} // Register the query results with mock SQL driver and assemble expected metric results list for _, row := range queryResults { rows.AddRow(row...) user := row[0] for i, metricsValue := range row { if i == 0 { continue } metricType := dto.MetricType_COUNTER // Current Connections and Current Memory are gauges if i == 6 || i == 9 { metricType = dto.MetricType_GAUGE } value, err := strconv.ParseFloat(metricsValue.(string), 64) if err != nil { t.Errorf("Failed to parse result value as float64: %+v", err) } // Statement latency & IO latency are latencies in picoseconds, convert them to seconds if i == 2 || i == 5 { value = value / picoSeconds } expectedMetrics = append(expectedMetrics, MetricResult{ labels: labelMap{"user": user.(string)}, value: value, metricType: metricType, }) } } mock.ExpectQuery(sanitizeQuery(regexp.QuoteMeta(sysUserSummaryQuery))).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { if err = (ScrapeSysUserSummary{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) }() // Ensure metrics look OK convey.Convey("Metrics comparison", t, func() { for _, expect := range expectedMetrics { got := readMetric(<-ch) convey.So(expect, convey.ShouldResemble, got) } }) // Ensure all SQL queries were executed if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled exceptions: %s", err) } } mysqld_exporter-0.15.0/config/000077500000000000000000000000001444546573200163215ustar00rootroot00000000000000mysqld_exporter-0.15.0/config/config.go000066400000000000000000000143301444546573200201160ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "crypto/tls" "crypto/x509" "fmt" "net" "os" "strconv" "strings" "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "gopkg.in/ini.v1" ) var ( configReloadSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "mysqld_exporter", Name: "config_last_reload_successful", Help: "Mysqld exporter config loaded successfully.", }) configReloadSeconds = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "mysqld_exporter", Name: "config_last_reload_success_timestamp_seconds", Help: "Timestamp of the last successful configuration reload.", }) cfg *ini.File opts = ini.LoadOptions{ // Do not error on nonexistent file to allow empty string as filename input Loose: true, // MySQL ini file can have boolean keys. AllowBooleanKeys: true, } err error ) type Config struct { Sections map[string]MySqlConfig } type MySqlConfig struct { User string `ini:"user"` Password string `ini:"password"` Host string `ini:"host"` Port int `ini:"port"` Socket string `ini:"socket"` SslCa string `ini:"ssl-ca"` SslCert string `ini:"ssl-cert"` SslKey string `ini:"ssl-key"` TlsInsecureSkipVerify bool `ini:"ssl-skip-verfication"` Tls string `ini:"tls"` } type MySqlConfigHandler struct { sync.RWMutex TlsInsecureSkipVerify bool Config *Config } func (ch *MySqlConfigHandler) GetConfig() *Config { ch.RLock() defer ch.RUnlock() return ch.Config } func (ch *MySqlConfigHandler) ReloadConfig(filename string, mysqldAddress string, mysqldUser string, tlsInsecureSkipVerify bool, logger log.Logger) error { var host, port string defer func() { if err != nil { configReloadSuccess.Set(0) } else { configReloadSuccess.Set(1) configReloadSeconds.SetToCurrentTime() } }() if cfg, err = ini.LoadSources( opts, []byte("[client]\npassword = ${MYSQLD_EXPORTER_PASSWORD}\n"), filename, ); err != nil { return fmt.Errorf("failed to load %s: %w", filename, err) } if host, port, err = net.SplitHostPort(mysqldAddress); err != nil { return fmt.Errorf("failed to parse address: %w", err) } if clientSection := cfg.Section("client"); clientSection != nil { if cfgHost := clientSection.Key("host"); cfgHost.String() == "" { cfgHost.SetValue(host) } if cfgPort := clientSection.Key("port"); cfgPort.String() == "" { cfgPort.SetValue(port) } if cfgUser := clientSection.Key("user"); cfgUser.String() == "" { cfgUser.SetValue(mysqldUser) } } cfg.ValueMapper = os.ExpandEnv config := &Config{} m := make(map[string]MySqlConfig) for _, sec := range cfg.Sections() { sectionName := sec.Name() if sectionName == "DEFAULT" { continue } mysqlcfg := &MySqlConfig{ TlsInsecureSkipVerify: tlsInsecureSkipVerify, } // FIXME: this error check seems orphaned if err != nil { level.Error(logger).Log("msg", "failed to load config", "section", sectionName, "err", err) continue } err = sec.StrictMapTo(mysqlcfg) if err != nil { level.Error(logger).Log("msg", "failed to parse config", "section", sectionName, "err", err) continue } if err := mysqlcfg.validateConfig(); err != nil { level.Error(logger).Log("msg", "failed to validate config", "section", sectionName, "err", err) continue } m[sectionName] = *mysqlcfg } config.Sections = m if len(config.Sections) == 0 { return fmt.Errorf("no configuration found") } ch.Lock() ch.Config = config ch.Unlock() return nil } func (m MySqlConfig) validateConfig() error { if m.User == "" { return fmt.Errorf("no user specified in section or parent") } return nil } func (m MySqlConfig) FormDSN(target string) (string, error) { config := mysql.NewConfig() config.User = m.User config.Passwd = m.Password config.Net = "tcp" if target == "" { if m.Socket == "" { host := "127.0.0.1" if m.Host != "" { host = m.Host } port := "3306" if m.Port != 0 { port = strconv.Itoa(m.Port) } config.Addr = net.JoinHostPort(host, port) } else { config.Net = "unix" config.Addr = m.Socket } } else if prefix := "unix://"; strings.HasPrefix(target, prefix) { config.Net = "unix" config.Addr = target[len(prefix):] } else { if _, _, err = net.SplitHostPort(target); err != nil { return "", fmt.Errorf("failed to parse target: %s", err) } config.Addr = target } if m.TlsInsecureSkipVerify { config.TLSConfig = "skip-verify" } else { config.TLSConfig = m.Tls if m.SslCa != "" { if err := m.CustomizeTLS(); err != nil { err = fmt.Errorf("failed to register a custom TLS configuration for mysql dsn: %w", err) return "", err } config.TLSConfig = "custom" } } return config.FormatDSN(), nil } func (m MySqlConfig) CustomizeTLS() error { var tlsCfg tls.Config caBundle := x509.NewCertPool() pemCA, err := os.ReadFile(m.SslCa) if err != nil { return err } if ok := caBundle.AppendCertsFromPEM(pemCA); ok { tlsCfg.RootCAs = caBundle } else { return fmt.Errorf("failed parse pem-encoded CA certificates from %s", m.SslCa) } if m.SslCert != "" && m.SslKey != "" { certPairs := make([]tls.Certificate, 0, 1) keypair, err := tls.LoadX509KeyPair(m.SslCert, m.SslKey) if err != nil { return fmt.Errorf("failed to parse pem-encoded SSL cert %s or SSL key %s: %w", m.SslCert, m.SslKey, err) } certPairs = append(certPairs, keypair) tlsCfg.Certificates = certPairs } tlsCfg.InsecureSkipVerify = m.TlsInsecureSkipVerify mysql.RegisterTLSConfig("custom", &tlsCfg) return nil } mysqld_exporter-0.15.0/config/config_test.go000066400000000000000000000167361444546573200211710ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "os" "testing" "github.com/go-kit/log" "github.com/smartystreets/goconvey/convey" ) func TestValidateConfig(t *testing.T) { convey.Convey("Working config validation", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } if err := c.ReloadConfig("testdata/client.cnf", "localhost:3306", "", true, log.NewNopLogger()); err != nil { t.Error(err) } convey.Convey("Valid configuration", func() { cfg := c.GetConfig() convey.So(cfg.Sections, convey.ShouldContainKey, "client") convey.So(cfg.Sections, convey.ShouldContainKey, "client.server1") section, ok := cfg.Sections["client"] convey.So(ok, convey.ShouldBeTrue) convey.So(section.User, convey.ShouldEqual, "root") convey.So(section.Password, convey.ShouldEqual, "abc") childSection, ok := cfg.Sections["client.server1"] convey.So(ok, convey.ShouldBeTrue) convey.So(childSection.User, convey.ShouldEqual, "test") convey.So(childSection.Password, convey.ShouldEqual, "foo") }) convey.Convey("False on non-existent section", func() { cfg := c.GetConfig() _, ok := cfg.Sections["fakeclient"] convey.So(ok, convey.ShouldBeFalse) }) }) convey.Convey("Inherit from parent section", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } if err := c.ReloadConfig("testdata/child_client.cnf", "localhost:3306", "", true, log.NewNopLogger()); err != nil { t.Error(err) } cfg := c.GetConfig() section, _ := cfg.Sections["client.server1"] convey.So(section.Password, convey.ShouldEqual, "abc") }) convey.Convey("Environment variable / CLI flags", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } os.Setenv("MYSQLD_EXPORTER_PASSWORD", "supersecretpassword") if err := c.ReloadConfig("", "testhost:5000", "testuser", true, log.NewNopLogger()); err != nil { t.Error(err) } cfg := c.GetConfig() section := cfg.Sections["client"] convey.So(section.Host, convey.ShouldEqual, "testhost") convey.So(section.Port, convey.ShouldEqual, 5000) convey.So(section.User, convey.ShouldEqual, "testuser") convey.So(section.Password, convey.ShouldEqual, "supersecretpassword") }) convey.Convey("Environment variable / CLI flags error without port", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } os.Setenv("MYSQLD_EXPORTER_PASSWORD", "supersecretpassword") err := c.ReloadConfig("", "testhost", "testuser", true, log.NewNopLogger()) convey.So( err, convey.ShouldBeError, ) }) convey.Convey("Config file precedence over environment variables", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } os.Setenv("MYSQLD_EXPORTER_PASSWORD", "supersecretpassword") if err := c.ReloadConfig("testdata/client.cnf", "localhost:3306", "fakeuser", true, log.NewNopLogger()); err != nil { t.Error(err) } cfg := c.GetConfig() section := cfg.Sections["client"] convey.So(section.User, convey.ShouldEqual, "root") convey.So(section.Password, convey.ShouldEqual, "abc") }) convey.Convey("Client without user", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } os.Clearenv() err := c.ReloadConfig("testdata/missing_user.cnf", "localhost:3306", "", true, log.NewNopLogger()) convey.So( err, convey.ShouldResemble, fmt.Errorf("no configuration found"), ) }) convey.Convey("Client without password", t, func() { c := MySqlConfigHandler{ Config: &Config{}, } os.Clearenv() if err := c.ReloadConfig("testdata/missing_password.cnf", "localhost:3306", "", true, log.NewNopLogger()); err != nil { t.Error(err) } cfg := c.GetConfig() section := cfg.Sections["client"] convey.So(section.User, convey.ShouldEqual, "abc") convey.So(section.Password, convey.ShouldEqual, "") }) } func TestFormDSN(t *testing.T) { var ( c = MySqlConfigHandler{ Config: &Config{}, } err error dsn string ) convey.Convey("Host exporter dsn", t, func() { if err := c.ReloadConfig("testdata/client.cnf", "localhost:3306", "", false, log.NewNopLogger()); err != nil { t.Error(err) } convey.Convey("Default Client", func() { cfg := c.GetConfig() section := cfg.Sections["client"] if dsn, err = section.FormDSN(""); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "root:abc@tcp(server2:3306)/") }) convey.Convey("Target specific with explicit port", func() { cfg := c.GetConfig() section := cfg.Sections["client.server1"] if dsn, err = section.FormDSN("server1:5000"); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "test:foo@tcp(server1:5000)/") }) convey.Convey("UNIX domain socket", func() { cfg := c.GetConfig() section := cfg.Sections["client.server1"] if dsn, err = section.FormDSN("unix:///run/mysqld/mysqld.sock"); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "test:foo@unix(/run/mysqld/mysqld.sock)/") }) }) } func TestFormDSNWithSslSkipVerify(t *testing.T) { var ( c = MySqlConfigHandler{ Config: &Config{}, } err error dsn string ) convey.Convey("Host exporter dsn with tls skip verify", t, func() { if err := c.ReloadConfig("testdata/client.cnf", "localhost:3306", "", true, log.NewNopLogger()); err != nil { t.Error(err) } convey.Convey("Default Client", func() { cfg := c.GetConfig() section := cfg.Sections["client"] if dsn, err = section.FormDSN(""); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "root:abc@tcp(server2:3306)/?tls=skip-verify") }) convey.Convey("Target specific with explicit port", func() { cfg := c.GetConfig() section := cfg.Sections["client.server1"] if dsn, err = section.FormDSN("server1:5000"); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "test:foo@tcp(server1:5000)/?tls=skip-verify") }) }) } func TestFormDSNWithCustomTls(t *testing.T) { var ( c = MySqlConfigHandler{ Config: &Config{}, } err error dsn string ) convey.Convey("Host exporter dsn with custom tls", t, func() { if err := c.ReloadConfig("testdata/client_custom_tls.cnf", "localhost:3306", "", false, log.NewNopLogger()); err != nil { t.Error(err) } convey.Convey("Target tls enabled", func() { cfg := c.GetConfig() section := cfg.Sections["client_tls_true"] if dsn, err = section.FormDSN(""); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "usr:pwd@tcp(server2:3306)/?tls=true") }) convey.Convey("Target tls preferred", func() { cfg := c.GetConfig() section := cfg.Sections["client_tls_preferred"] if dsn, err = section.FormDSN(""); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "usr:pwd@tcp(server3:3306)/?tls=preferred") }) convey.Convey("Target tls skip-verify", func() { cfg := c.GetConfig() section := cfg.Sections["client_tls_skip_verify"] if dsn, err = section.FormDSN(""); err != nil { t.Error(err) } convey.So(dsn, convey.ShouldEqual, "usr:pwd@tcp(server3:3306)/?tls=skip-verify") }) }) } mysqld_exporter-0.15.0/config/testdata/000077500000000000000000000000001444546573200201325ustar00rootroot00000000000000mysqld_exporter-0.15.0/config/testdata/child_client.cnf000066400000000000000000000001011444546573200232330ustar00rootroot00000000000000[client] user = root password = abc [client.server1] user = root mysqld_exporter-0.15.0/config/testdata/client.cnf000066400000000000000000000001371444546573200221010ustar00rootroot00000000000000[client] user = root password = abc host = server2 [client.server1] user = test password = foo mysqld_exporter-0.15.0/config/testdata/client_custom_tls.cnf000066400000000000000000000004101444546573200243470ustar00rootroot00000000000000[client_tls_true] host = server2 port = 3306 user = usr password = pwd tls=true [client_tls_preferred] host = server3 port = 3306 user = usr password = pwd tls=preferred [client_tls_skip_verify] host = server3 port = 3306 user = usr password = pwd tls=skip-verify mysqld_exporter-0.15.0/config/testdata/missing_password.cnf000066400000000000000000000000241444546573200242110ustar00rootroot00000000000000[client] user = abc mysqld_exporter-0.15.0/config/testdata/missing_user.cnf000066400000000000000000000000301444546573200233220ustar00rootroot00000000000000[client] password = abc mysqld_exporter-0.15.0/go.mod000066400000000000000000000032671444546573200161720ustar00rootroot00000000000000module github.com/prometheus/mysqld_exporter go 1.18 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/go-kit/log v0.2.1 github.com/go-sql-driver/mysql v1.7.1 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/prometheus/client_golang v1.15.1 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 github.com/prometheus/exporter-toolkit v0.10.0 github.com/smartystreets/goconvey v1.8.0 gopkg.in/ini.v1 v1.67.0 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/smartystreets/assertions v1.13.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.8.0 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) mysqld_exporter-0.15.0/go.sum000066400000000000000000000214411444546573200162110ustar00rootroot00000000000000github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= mysqld_exporter-0.15.0/mysqld-mixin/000077500000000000000000000000001444546573200175075ustar00rootroot00000000000000mysqld_exporter-0.15.0/mysqld-mixin/.gitignore000066400000000000000000000000501444546573200214720ustar00rootroot00000000000000/alerts.yaml /rules.yaml dashboards_out mysqld_exporter-0.15.0/mysqld-mixin/Makefile000066400000000000000000000010641444546573200211500ustar00rootroot00000000000000JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s default: build all: fmt lint build clean fmt: find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ xargs -n 1 -- $(JSONNET_FMT) -i lint: find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ while read f; do \ $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ done mixtool lint mixin.libsonnet build: mixtool generate all mixin.libsonnet clean: rm -rf dashboards_out alerts.yaml rules.yaml mysqld_exporter-0.15.0/mysqld-mixin/README.md000066400000000000000000000014501444546573200207660ustar00rootroot00000000000000# MySQLd Mixin The MySQLd Mixin is a set of configurable, reusable, and extensible alerts and dashboards based on the metrics exported by the MySQLd Exporter. The mixin creates recording and alerting rules for Prometheus and suitable dashboard descriptions for Grafana. To use them, you need to have `mixtool` and `jsonnetfmt` installed. If you have a working Go development environment, it's easiest to run the following: ```bash $ go get github.com/monitoring-mixins/mixtool/cmd/mixtool $ go get github.com/google/go-jsonnet/cmd/jsonnetfmt ``` You can then build the Prometheus rules files `alerts.yaml` and `rules.yaml` and a directory `dashboard_out` with the JSON dashboard files for Grafana: ```bash $ make build ``` For more advanced uses of mixins, see https://github.com/monitoring-mixins/docs. mysqld_exporter-0.15.0/mysqld-mixin/alerts/000077500000000000000000000000001444546573200210015ustar00rootroot00000000000000mysqld_exporter-0.15.0/mysqld-mixin/alerts/galera.yaml000066400000000000000000000051341444546573200231230ustar00rootroot00000000000000### # Sample prometheus rules/alerts for mysqld. # # NOTE: Please review these carefully as thresholds and behavior may not meet # your SLOs or labels. # ### groups: - name: GaleraAlerts rules: - alert: MySQLGaleraNotReady expr: mysql_global_status_wsrep_ready != 1 for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not ready.' summary: Galera cluster node not ready. - alert: MySQLGaleraOutOfSync expr: (mysql_global_status_wsrep_local_state != 4 and mysql_global_variables_wsrep_desync == 0) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is not in sync ({{$value}} != 4).' summary: Galera cluster node out of sync. - alert: MySQLGaleraDonorFallingBehind expr: (mysql_global_status_wsrep_local_state == 2 and mysql_global_status_wsrep_local_recv_queue > 100) for: 5m labels: severity: warning annotations: description: '{{$labels.job}} on {{$labels.instance}} is a donor (hotbackup) and is falling behind (queue size {{$value}}).' summary: XtraDB cluster donor node falling behind. - alert: MySQLReplicationNotRunning expr: mysql_slave_status_slave_io_running == 0 or mysql_slave_status_slave_sql_running == 0 for: 2m labels: severity: critical annotations: description: "Replication on {{$labels.instance}} (IO or SQL) has been down for more than 2 minutes." summary: Replication is not running. - alert: MySQLReplicationLag expr: (instance:mysql_slave_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_slave_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: "Replication on {{$labels.instance}} has fallen behind and is not recovering." summary: MySQL slave replication is lagging. - alert: MySQLHeartbeatLag expr: (instance:mysql_heartbeat_lag_seconds > 30) and on(instance) (predict_linear(instance:mysql_heartbeat_lag_seconds[5m], 60 * 2) > 0) for: 1m labels: severity: critical annotations: description: "The heartbeat is lagging on {{$labels.instance}} and is not recovering." summary: MySQL heartbeat is lagging. - alert: MySQLInnoDBLogWaits expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10 labels: severity: warning annotations: description: The innodb logs are waiting for disk at a rate of {{$value}} / second summary: MySQL innodb log writes stalling. mysqld_exporter-0.15.0/mysqld-mixin/alerts/general.yaml000066400000000000000000000004001444546573200232740ustar00rootroot00000000000000groups: - name: MySQLdAlerts rules: - alert: MySQLDown expr: mysql_up != 1 for: 5m labels: severity: critical annotations: description: 'MySQL {{$labels.job}} on {{$labels.instance}} is not up.' summary: MySQL not up. mysqld_exporter-0.15.0/mysqld-mixin/dashboards/000077500000000000000000000000001444546573200216215ustar00rootroot00000000000000mysqld_exporter-0.15.0/mysqld-mixin/dashboards/mysql-overview.json000066400000000000000000003101361444546573200255310ustar00rootroot00000000000000{ "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "description": "", "editable": true, "gnetId": 11323, "graphTooltip": 1, "id": 31, "iteration": 1603186191702, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 382, "panels": [], "repeat": null, "title": "", "type": "row" }, { "cacheTimeout": null, "datasource": "$datasource", "description": "**Uptime**\n\nThe amount of time since the last restart of the MySQL server process.", "fieldConfig": { "defaults": { "custom": {}, "decimals": 1, "mappings": [], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "rgba(245, 54, 54, 0.9)", "value": null }, { "color": "rgba(237, 129, 40, 0.89)", "value": 300 }, { "color": "rgba(50, 172, 45, 0.97)", "value": 3600 } ] }, "unit": "s" }, "overrides": [] }, "gridPos": { "h": 3, "w": 8, "x": 0, "y": 1 }, "id": 12, "interval": "1m", "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "lastNotNull" ] }, "graphMode": "none", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false } }, "pluginVersion": "7.0.4", "targets": [ { "calculatedInterval": "10m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_uptime{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{instance}}", "metric": "", "refId": "A", "step": 300 } ], "title": "Uptime", "type": "stat" }, { "cacheTimeout": null, "datasource": "$datasource", "description": "**Current QPS**\n\nBased on the queries reported by MySQL's ``SHOW STATUS`` command, it is the number of statements executed by the server within the last second. This variable includes statements executed within stored programs, unlike the Questions variable. It does not count \n``COM_PING`` or ``COM_STATISTICS`` commands.", "fieldConfig": { "defaults": { "custom": {}, "decimals": 2, "mappings": [], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "rgba(245, 54, 54, 0.9)", "value": null }, { "color": "rgba(237, 129, 40, 0.89)", "value": 35 }, { "color": "rgba(50, 172, 45, 0.97)", "value": 75 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 3, "w": 8, "x": 8, "y": 1 }, "id": 13, "interval": "1m", "links": [ { "targetBlank": true, "title": "MySQL Server Status Variables", "url": "https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html#statvar_Queries" } ], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "lastNotNull" ] }, "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false } }, "pluginVersion": "7.0.4", "targets": [ { "calculatedInterval": "10m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_queries{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{instance}}", "metric": "", "refId": "A", "step": 20 } ], "title": "Current QPS", "type": "stat" }, { "cacheTimeout": null, "datasource": "$datasource", "description": "**InnoDB Buffer Pool Size**\n\nInnoDB maintains a storage area called the buffer pool for caching data and indexes in memory. Knowing how the InnoDB buffer pool works, and taking advantage of it to keep frequently accessed data in memory, is one of the most important aspects of MySQL tuning. The goal is to keep the working set in memory. In most cases, this should be between 60%-90% of available memory on a dedicated database host, but depends on many factors.", "fieldConfig": { "defaults": { "custom": {}, "decimals": 0, "mappings": [], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "rgba(50, 172, 45, 0.97)", "value": null }, { "color": "rgba(237, 129, 40, 0.89)", "value": 90 }, { "color": "rgba(245, 54, 54, 0.9)", "value": 95 } ] }, "unit": "bytes" }, "overrides": [] }, "gridPos": { "h": 3, "w": 8, "x": 16, "y": 1 }, "id": 51, "interval": "1m", "links": [ { "targetBlank": true, "title": "Tuning the InnoDB Buffer Pool Size", "url": "https://www.percona.com/blog/2015/06/02/80-ram-tune-innodb_buffer_pool_size/" } ], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "lastNotNull" ] }, "graphMode": "none", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false } }, "pluginVersion": "7.0.4", "targets": [ { "calculatedInterval": "10m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_variables_innodb_buffer_pool_size{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{instance}}", "metric": "", "refId": "A", "step": 300 } ], "title": "InnoDB Buffer Pool", "type": "stat" }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 4 }, "id": 383, "panels": [], "repeat": null, "title": "Connections", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 0, "description": "**Max Connections** \n\nMax Connections is the maximum permitted number of simultaneous client connections. By default, this is 151. Increasing this value increases the number of file descriptors that mysqld requires. If the required number of descriptors are not available, the server reduces the value of Max Connections.\n\nmysqld actually permits Max Connections + 1 clients to connect. The extra connection is reserved for use by accounts that have the SUPER privilege, such as root.\n\nMax Used Connections is the maximum number of connections that have been in use simultaneously since the server started.\n\nConnections is the number of connection attempts (successful or not) to the MySQL server.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 5 }, "height": "250px", "hiddenSeries": false, "id": 92, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "targetBlank": true, "title": "MySQL Server System Variables", "url": "https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Max Connections", "fill": 0 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(max_over_time(mysql_global_status_threads_connected{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Connections", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(mysql_global_status_max_used_connections{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Max Used Connections", "metric": "", "refId": "C", "step": 20, "target": "" }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(mysql_global_variables_max_connections{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Max Connections", "metric": "", "refId": "B", "step": 20, "target": "" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Connections", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Active Threads**\n\nThreads Connected is the number of open connections, while Threads Running is the number of threads not sleeping.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 5 }, "hiddenSeries": false, "id": 10, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Peak Threads Running", "color": "#E24D42", "lines": false, "pointradius": 1, "points": true }, { "alias": "Peak Threads Connected", "color": "#1F78C1" }, { "alias": "Avg Threads Running", "color": "#EAB839" } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(max_over_time(mysql_global_status_threads_connected{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "hide": false, "interval": "1m", "intervalFactor": 1, "legendFormat": "Peak Threads Connected", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(max_over_time(mysql_global_status_threads_running{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Peak Threads Running", "metric": "", "refId": "B", "step": 20 }, { "expr": "sum(avg_over_time(mysql_global_status_threads_running{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Avg Threads Running", "refId": "C", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Client Thread Activity", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "label": "Threads", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 12 }, "id": 384, "panels": [], "repeat": null, "title": "Table Locks", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Questions**\n\nThe number of statements executed by the server. This includes only statements sent to the server by clients and not statements executed within stored programs, unlike the Queries used in the QPS calculation. \n\nThis variable does not count the following commands:\n* ``COM_PING``\n* ``COM_STATISTICS``\n* ``COM_STMT_PREPARE``\n* ``COM_STMT_CLOSE``\n* ``COM_STMT_RESET``", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 13 }, "hiddenSeries": false, "id": 53, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "targetBlank": true, "title": "MySQL Queries and Questions", "url": "https://www.percona.com/blog/2014/05/29/how-mysql-queries-and-questions-are-measured/" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_questions{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{instance}}", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Questions", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Thread Cache**\n\nThe thread_cache_size variable sets how many threads the server should cache to reuse. When a client disconnects, the client's threads are put in the cache if the cache is not full. It is autosized in MySQL 5.6.8 and above (capped to 100). Requests for threads are satisfied by reusing threads taken from the cache if possible, and only when the cache is empty is a new thread created.\n\n* *Threads_created*: The number of threads created to handle connections.\n* *Threads_cached*: The number of threads in the thread cache.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 13 }, "hiddenSeries": false, "id": 11, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Tuning information", "url": "https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_thread_cache_size" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Threads Created", "fill": 0 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(mysql_global_variables_thread_cache_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Thread Cache Size", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(mysql_global_status_threads_cached{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Threads Cached", "metric": "", "refId": "C", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_threads_created{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Threads Created", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Thread Cache", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 20 }, "id": 385, "panels": [], "repeat": null, "title": "Temporary Objects", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 21 }, "hiddenSeries": false, "id": 22, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_created_tmp_tables{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 1, "legendFormat": "Created Tmp Tables", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_created_tmp_disk_tables{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 1, "legendFormat": "Created Tmp Disk Tables", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_created_tmp_files{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 1, "legendFormat": "Created Tmp Files", "metric": "", "refId": "C", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Temporary Objects", "description": "MySQL Temporary Objects", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Select Types**\n\nAs with most relational databases, selecting based on indexes is more efficient than scanning an entire table's data. Here we see the counters for selects not done with indexes.\n\n* ***Select Scan*** is how many queries caused full table scans, in which all the data in the table had to be read and either discarded or returned.\n* ***Select Range*** is how many queries used a range scan, which means MySQL scanned all rows in a given range.\n* ***Select Full Join*** is the number of joins that are not joined on an index, this is usually a huge performance hit.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 21 }, "height": "250px", "hiddenSeries": false, "id": 311, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_select_full_join{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Select Full Join", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_select_full_range_join{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Select Full Range Join", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_select_range{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Select Range", "metric": "", "refId": "C", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_select_range_check{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Select Range Check", "metric": "", "refId": "D", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_select_scan{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Select Scan", "metric": "", "refId": "E", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Select Types", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 28 }, "id": 386, "panels": [], "repeat": null, "title": "Sorts", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Sorts**\n\nDue to a query's structure, order, or other requirements, MySQL sorts the rows before returning them. For example, if a table is ordered 1 to 10 but you want the results reversed, MySQL then has to sort the rows to return 10 to 1.\n\nThis graph also shows when sorts had to scan a whole table or a given range of a table in order to return the results and which could not have been sorted via an index.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 29 }, "hiddenSeries": false, "id": 30, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_sort_rows{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Sort Rows", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_sort_range{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Sort Range", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_sort_merge_passes{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Sort Merge Passes", "metric": "", "refId": "C", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_sort_scan{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Sort Scan", "metric": "", "refId": "D", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Sorts", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Slow Queries**\n\nSlow queries are defined as queries being slower than the long_query_time setting. For example, if you have long_query_time set to 3, all queries that take longer than 3 seconds to complete will show on this graph.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 29 }, "hiddenSeries": false, "id": 48, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_slow_queries{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Slow Queries", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Slow Queries", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 36 }, "id": 387, "panels": [], "repeat": null, "title": "Aborted", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**Aborted Connections**\n\nWhen a given host connects to MySQL and the connection is interrupted in the middle (for example due to bad credentials), MySQL keeps that info in a system table (since 5.6 this table is exposed in performance_schema).\n\nIf the amount of failed requests without a successful connection reaches the value of max_connect_errors, mysqld assumes that something is wrong and blocks the host from further connection.\n\nTo allow connections from that host again, you need to issue the ``FLUSH HOSTS`` statement.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 37 }, "hiddenSeries": false, "id": 47, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_aborted_connects{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Aborted Connects (attempts)", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_aborted_clients{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Aborted Clients (timeout)", "metric": "", "refId": "B", "step": 20, "target": "" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Aborted Connections", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": "", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**Table Locks**\n\nMySQL takes a number of different locks for varying reasons. In this graph we see how many Table level locks MySQL has requested from the storage engine. In the case of InnoDB, many times the locks could actually be row locks as it only takes table level locks in a few specific cases.\n\nIt is most useful to compare Locks Immediate and Locks Waited. If Locks waited is rising, it means you have lock contention. Otherwise, Locks Immediate rising and falling is normal activity.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 37 }, "hiddenSeries": false, "id": 32, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_table_locks_immediate{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Table Locks Immediate", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_table_locks_waited{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Table Locks Waited", "metric": "", "refId": "B", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Table Locks", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 44 }, "id": 388, "panels": [], "repeat": null, "title": "Network", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Network Traffic**\n\nHere we can see how much network traffic is generated by MySQL. Outbound is network traffic sent from MySQL and Inbound is network traffic MySQL has received.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 6, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 45 }, "hiddenSeries": false, "id": 9, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_bytes_received{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Inbound", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "sum(rate(mysql_global_status_bytes_sent{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Outbound", "metric": "", "refId": "B", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Network Traffic", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "Bps", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "none", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 52 }, "id": 389, "panels": [], "repeat": null, "title": "Memory", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 0, "description": "***System Memory***: Total Memory for the system.\\\n***InnoDB Buffer Pool Data***: InnoDB maintains a storage area called the buffer pool for caching data and indexes in memory.\\\n***TokuDB Cache Size***: Similar in function to the InnoDB Buffer Pool, TokuDB will allocate 50% of the installed RAM for its own cache.\\\n***Key Buffer Size***: Index blocks for MYISAM tables are buffered and are shared by all threads. key_buffer_size is the size of the buffer used for index blocks.\\\n***Adaptive Hash Index Size***: When InnoDB notices that some index values are being accessed very frequently, it builds a hash index for them in memory on top of B-Tree indexes.\\\n ***Query Cache Size***: The query cache stores the text of a SELECT statement together with the corresponding result that was sent to the client. The query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time.\\\n***InnoDB Dictionary Size***: The data dictionary is InnoDB ‘s internal catalog of tables. InnoDB stores the data dictionary on disk, and loads entries into memory while the server is running.\\\n***InnoDB Log Buffer Size***: The MySQL InnoDB log buffer allows transactions to run without having to write the log to disk before the transactions commit.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 6, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 53 }, "hiddenSeries": false, "id": 50, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideEmpty": true, "hideZero": true, "max": true, "min": true, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Detailed descriptions about metrics", "url": "https://www.percona.com/doc/percona-monitoring-and-management/dashboard.mysql-overview.html#mysql-internal-memory-overview" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "System Memory", "fill": 0, "stack": false } ], "spaceLength": 10, "stack": true, "steppedLine": false, "targets": [ { "expr": "sum(mysql_global_status_innodb_page_size{job=~\"$job\", instance=~\"$instance\"} * on (instance) mysql_global_status_buffer_pool_pages{job=~\"$job\", instance=~\"$instance\", state=\"data\"})", "format": "time_series", "hide": false, "interval": "1m", "intervalFactor": 1, "legendFormat": "InnoDB Buffer Pool Data", "refId": "A", "step": 20 }, { "expr": "sum(mysql_global_variables_innodb_log_buffer_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "InnoDB Log Buffer Size", "refId": "D", "step": 20 }, { "expr": "sum(mysql_global_variables_innodb_additional_mem_pool_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 2, "legendFormat": "InnoDB Additional Memory Pool Size", "refId": "H", "step": 40 }, { "expr": "sum(mysql_global_status_innodb_mem_dictionary{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "InnoDB Dictionary Size", "refId": "F", "step": 20 }, { "expr": "sum(mysql_global_variables_key_buffer_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Key Buffer Size", "refId": "B", "step": 20 }, { "expr": "sum(mysql_global_variables_query_cache_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Query Cache Size", "refId": "C", "step": 20 }, { "expr": "sum(mysql_global_status_innodb_mem_adaptive_hash{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Adaptive Hash Index Size", "refId": "E", "step": 20 }, { "expr": "sum(mysql_global_variables_tokudb_cache_size{job=~\"$job\", instance=~\"$instance\"})", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "TokuDB Cache Size", "refId": "I", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Internal Memory Overview", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "label": "", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 60 }, "id": 390, "panels": [], "repeat": null, "title": "Command, Handlers, Processes", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**Top Command Counters**\n\nThe Com_{{xxx}} statement counter variables indicate the number of times each xxx statement has been executed. There is one status variable for each type of statement. For example, Com_delete and Com_update count [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements, respectively. Com_delete_multi and Com_update_multi are similar but apply to [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements that use multiple-table syntax.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 61 }, "hiddenSeries": false, "id": 14, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Server Status Variables (Com_xxx)", "url": "https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html#statvar_Com_xxx" } ], "nullPointMode": "null as zero", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "topk(5, rate(mysql_global_status_commands_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])>0)", "format": "time_series", "hide": false, "interval": "1m", "intervalFactor": 1, "legendFormat": "Com_{{ command }}", "metric": "", "refId": "B", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Top Command Counters", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Handlers**\n\nHandler statistics are internal statistics on how MySQL is selecting, updating, inserting, and modifying rows, tables, and indexes.\n\nThis is in fact the layer between the Storage Engine and MySQL.\n\n* `read_rnd_next` is incremented when the server performs a full table scan and this is a counter you don't really want to see with a high value.\n* `read_key` is incremented when a read is done with an index.\n* `read_next` is incremented when the storage engine is asked to 'read the next index entry'. A high value means a lot of index scans are being done.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 68 }, "hiddenSeries": false, "id": 8, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": true, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_handlers_total{job=~\"$job\",instance=~\"$instance\", handler!~\"commit|rollback|savepoint.*|prepare\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{ handler }}", "metric": "", "refId": "J", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Handlers", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 75 }, "hiddenSeries": false, "id": 28, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": true, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_handlers_total{job=~\"$job\",instance=~\"$instance\", handler=~\"commit|rollback|savepoint.*|prepare\"}[$__rate_interval])", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{ handler }}", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Transaction Handlers", "description": "MySQL Transaction Handlers", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 0, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 82 }, "hiddenSeries": false, "id": 40, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null as zero", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_info_schema_processlist_threads{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{ state }}", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Process States", "description": "Process States", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 6, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 89 }, "hiddenSeries": false, "id": 49, "legend": { "alignAsTable": true, "avg": true, "current": false, "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": false, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "topk(5, avg_over_time(mysql_info_schema_processlist_threads{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 1, "legendFormat": "{{ state }}", "metric": "", "refId": "A", "step": 3600 } ], "thresholds": [], "timeFrom": "24h", "timeRegions": [], "timeShift": null, "title": "Top Process States Hourly", "description": "Top Process States Hourly", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 96 }, "id": 391, "panels": [], "repeat": null, "title": "Query Cache", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Query Cache Memory**\n\nThe query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time. This serialization is true not only for SELECTs, but also for INSERT/UPDATE/DELETE.\n\nThis also means that the larger the `query_cache_size` is set to, the slower those operations become. In concurrent environments, the MySQL Query Cache quickly becomes a contention point, decreasing performance. MariaDB and AWS Aurora have done work to try and eliminate the query cache contention in their flavors of MySQL, while MySQL 8.0 has eliminated the query cache feature.\n\nThe recommended settings for most environments is to set:\n ``query_cache_type=0``\n ``query_cache_size=0``\n\nNote that while you can dynamically change these values, to completely remove the contention point you have to restart the database.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 97 }, "hiddenSeries": false, "id": 46, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_qcache_free_memory{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Free Memory", "metric": "", "refId": "F", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_variables_query_cache_size{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Query Cache Size", "metric": "", "refId": "E", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Query Cache Memory", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Query Cache Activity**\n\nThe query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time. This serialization is true not only for SELECTs, but also for INSERT/UPDATE/DELETE.\n\nThis also means that the larger the `query_cache_size` is set to, the slower those operations become. In concurrent environments, the MySQL Query Cache quickly becomes a contention point, decreasing performance. MariaDB and AWS Aurora have done work to try and eliminate the query cache contention in their flavors of MySQL, while MySQL 8.0 has eliminated the query cache feature.\n\nThe recommended settings for most environments is to set:\n``query_cache_type=0``\n``query_cache_size=0``\n\nNote that while you can dynamically change these values, to completely remove the contention point you have to restart the database.", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 97 }, "height": "", "hiddenSeries": false, "id": 45, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_qcache_hits{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Hits", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_qcache_inserts{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Inserts", "metric": "", "refId": "C", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_qcache_not_cached{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Not Cached", "metric": "", "refId": "D", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_qcache_lowmem_prunes{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Prunes", "metric": "", "refId": "F", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_qcache_queries_in_cache{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Queries in Cache", "metric": "", "refId": "E", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Query Cache Activity", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 104 }, "id": 392, "panels": [], "repeat": null, "title": "Files and Tables", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 105 }, "hiddenSeries": false, "id": 43, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_opened_files{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "interval": "1m", "intervalFactor": 1, "legendFormat": "Openings", "metric": "", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL File Openings", "description": "MySQL File Openings", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 105 }, "hiddenSeries": false, "id": 41, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_open_files{job=~\"$job\", instance=~\"$instance\"}", "interval": "1m", "intervalFactor": 1, "legendFormat": "Open Files", "metric": "", "refId": "A", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_variables_open_files_limit{job=~\"$job\", instance=~\"$instance\"}", "interval": "1m", "intervalFactor": 1, "legendFormat": "Open Files Limit", "metric": "", "refId": "D", "step": 20 }, { "expr": "mysql_global_status_innodb_num_open_files{job=~\"$job\", instance=~\"$instance\"}", "interval": "1m", "intervalFactor": 1, "legendFormat": "InnoDB Open Files", "refId": "B", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Open Files", "description": "MySQL Open Files", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 112 }, "id": 393, "panels": [], "repeat": null, "title": "Table Openings", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Table Open Cache Status**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 113 }, "hiddenSeries": false, "id": 44, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Server Status Variables (table_open_cache)", "url": "http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_table_open_cache" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Table Open Cache Hit Ratio", "yaxis": 2 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "rate(mysql_global_status_opened_tables{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Openings", "metric": "", "refId": "A", "step": 20 }, { "expr": "rate(mysql_global_status_table_open_cache_hits{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Hits", "refId": "B", "step": 20 }, { "expr": "rate(mysql_global_status_table_open_cache_misses{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Misses", "refId": "C", "step": 20 }, { "expr": "rate(mysql_global_status_table_open_cache_overflows{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Misses due to Overflows", "refId": "D", "step": 20 }, { "expr": "rate(mysql_global_status_table_open_cache_hits{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])/(rate(mysql_global_status_table_open_cache_hits{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])+rate(mysql_global_status_table_open_cache_misses{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Table Open Cache Hit Ratio", "refId": "E", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Table Open Cache Status", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "percentunit", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Open Tables**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 113 }, "hiddenSeries": false, "id": 42, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Server Status Variables (table_open_cache)", "url": "http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_table_open_cache" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_open_tables{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Open Tables", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_variables_table_open_cache{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Table Open Cache", "metric": "", "refId": "C", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Open Tables", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "collapsed": false, "datasource": "$datasource", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 120 }, "id": 394, "panels": [], "repeat": null, "title": "MySQL Table Definition Cache", "type": "row" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", "decimals": 2, "description": "**MySQL Table Definition Cache**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).", "editable": true, "error": false, "fieldConfig": { "defaults": { "unit": "short", "custom": {} }, "overrides": [] }, "fill": 2, "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 121 }, "hiddenSeries": false, "id": 54, "legend": { "alignAsTable": true, "avg": true, "current": false, "max": true, "min": true, "rightSide": false, "show": true, "sort": "avg", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 2, "links": [ { "title": "Server Status Variables (table_open_cache)", "url": "http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_table_open_cache" } ], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Opened Table Definitions", "yaxis": 2 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_status_open_table_definitions{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Open Table Definitions", "metric": "", "refId": "B", "step": 20 }, { "calculatedInterval": "2m", "datasourceErrors": {}, "errors": {}, "expr": "mysql_global_variables_table_definition_cache{job=~\"$job\", instance=~\"$instance\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Table Definitions Cache Size", "metric": "", "refId": "C", "step": 20 }, { "expr": "rate(mysql_global_status_opened_table_definitions{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])", "format": "time_series", "interval": "1m", "intervalFactor": 1, "legendFormat": "Opened Table Definitions", "refId": "A", "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "MySQL Table Definition Cache", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true }, { "format": "short", "logBase": 1, "max": null, "min": 0, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "refresh": "10s", "schemaVersion": 25, "style": "dark", "tags": [], "templating": { "list": [ { "current": { "selected": false, "text": "prometheus", "value": "prometheus" }, "hide": 0, "includeAll": false, "label": "Data Source", "multi": false, "name": "datasource", "options": [], "query": "prometheus", "refresh": 1, "regex": "", "skipUrlSync": false, "type": "datasource" }, { "allValue": ".+", "current": { "selected": true, "text": "hosted-grafana/cloudsql-proxy-mysql-exporter", "value": [ "hosted-grafana/cloudsql-proxy-mysql-exporter" ] }, "datasource": "$datasource", "definition": "label_values(mysql_up, job)", "hide": 0, "includeAll": true, "label": "job", "multi": true, "name": "job", "options": [], "query": "label_values(mysql_up, job)", "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": ".+", "current": { "selected": true, "tags": [], "text": "All", "value": [ "$__all" ] }, "datasource": "$datasource", "definition": "label_values(mysql_up, instance)", "hide": 0, "includeAll": true, "label": "instance", "multi": true, "name": "instance", "options": [], "query": "label_values(mysql_up, instance)", "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": { "collapse": false, "enable": true, "hidden": false, "notice": false, "now": true, "refresh_intervals": [ "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "status": "Stable", "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ], "type": "timepicker" }, "timezone": "", "title": "MySQL", "uid": "549c2bf8936f7767ea6ac47c47b00f2a", "version": 1 } mysqld_exporter-0.15.0/mysqld-mixin/mixin.libsonnet000066400000000000000000000010221444546573200225450ustar00rootroot00000000000000{ grafanaDashboards: { 'mysql-overview.json': (import 'dashboards/mysql-overview.json'), }, // Helper function to ensure that we don't override other rules, by forcing // the patching of the groups list, and not the overall rules object. local importRules(rules) = { groups+: std.native('parseYaml')(rules)[0].groups, }, prometheusRules+: importRules(importstr 'rules/rules.yaml'), prometheusAlerts+: importRules(importstr 'alerts/general.yaml') + importRules(importstr 'alerts/galera.yaml'), } mysqld_exporter-0.15.0/mysqld-mixin/rules/000077500000000000000000000000001444546573200206415ustar00rootroot00000000000000mysqld_exporter-0.15.0/mysqld-mixin/rules/rules.yaml000066400000000000000000000011511444546573200226550ustar00rootroot00000000000000groups: - name: mysqld_rules rules: # Record slave lag seconds for pre-computed timeseries that takes # `mysql_slave_status_sql_delay` into account - record: instance:mysql_slave_lag_seconds expr: mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay # Record slave lag via heartbeat method - record: instance:mysql_heartbeat_lag_seconds expr: mysql_heartbeat_now_timestamp_seconds - mysql_heartbeat_stored_timestamp_seconds - record: job:mysql_transactions:rate5m expr: sum without (command) (rate(mysql_global_status_commands_total{command=~"(commit|rollback)"}[5m])) mysqld_exporter-0.15.0/mysqld_exporter.go000066400000000000000000000231321444546573200206450ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "net/http" "os" "strconv" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" "github.com/prometheus/mysqld_exporter/collector" "github.com/prometheus/mysqld_exporter/config" ) var ( metricsPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", ).Default("/metrics").String() timeoutOffset = kingpin.Flag( "timeout-offset", "Offset to subtract from timeout in seconds.", ).Default("0.25").Float64() configMycnf = kingpin.Flag( "config.my-cnf", "Path to .my.cnf file to read MySQL credentials from.", ).Default(".my.cnf").String() mysqldAddress = kingpin.Flag( "mysqld.address", "Address to use for connecting to MySQL", ).Default("localhost:3306").String() mysqldUser = kingpin.Flag( "mysqld.username", "Hostname to use for connecting to MySQL", ).String() tlsInsecureSkipVerify = kingpin.Flag( "tls.insecure-skip-verify", "Ignore certificate and server verification when using a tls connection.", ).Bool() toolkitFlags = webflag.AddFlags(kingpin.CommandLine, ":9104") c = config.MySqlConfigHandler{ Config: &config.Config{}, } ) // scrapers lists all possible collection methods and if they should be enabled by default. var scrapers = map[collector.Scraper]bool{ collector.ScrapeGlobalStatus{}: true, collector.ScrapeGlobalVariables{}: true, collector.ScrapeSlaveStatus{}: true, collector.ScrapeProcesslist{}: false, collector.ScrapeUser{}: false, collector.ScrapeTableSchema{}: false, collector.ScrapeInfoSchemaInnodbTablespaces{}: false, collector.ScrapeInnodbMetrics{}: false, collector.ScrapeAutoIncrementColumns{}: false, collector.ScrapeBinlogSize{}: false, collector.ScrapePerfTableIOWaits{}: false, collector.ScrapePerfIndexIOWaits{}: false, collector.ScrapePerfTableLockWaits{}: false, collector.ScrapePerfEventsStatements{}: false, collector.ScrapePerfEventsStatementsSum{}: false, collector.ScrapePerfEventsWaits{}: false, collector.ScrapePerfFileEvents{}: false, collector.ScrapePerfFileInstances{}: false, collector.ScrapePerfMemoryEvents{}: false, collector.ScrapePerfReplicationGroupMembers{}: false, collector.ScrapePerfReplicationGroupMemberStats{}: false, collector.ScrapePerfReplicationApplierStatsByWorker{}: false, collector.ScrapeSysUserSummary{}: false, collector.ScrapeUserStat{}: false, collector.ScrapeClientStat{}: false, collector.ScrapeTableStat{}: false, collector.ScrapeSchemaStat{}: false, collector.ScrapeInnodbCmp{}: true, collector.ScrapeInnodbCmpMem{}: true, collector.ScrapeQueryResponseTime{}: true, collector.ScrapeEngineTokudbStatus{}: false, collector.ScrapeEngineInnodbStatus{}: false, collector.ScrapeHeartbeat{}: false, collector.ScrapeSlaveHosts{}: false, collector.ScrapeReplicaHost{}: false, } func filterScrapers(scrapers []collector.Scraper, collectParams []string) []collector.Scraper { var filteredScrapers []collector.Scraper // Check if we have some "collect[]" query parameters. if len(collectParams) > 0 { filters := make(map[string]bool) for _, param := range collectParams { filters[param] = true } for _, scraper := range scrapers { if filters[scraper.Name()] { filteredScrapers = append(filteredScrapers, scraper) } } } if len(filteredScrapers) == 0 { return scrapers } return filteredScrapers } func init() { prometheus.MustRegister(version.NewCollector("mysqld_exporter")) } func newHandler(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var dsn string var err error target := "" q := r.URL.Query() if q.Has("target") { target = q.Get("target") } cfg := c.GetConfig() cfgsection, ok := cfg.Sections["client"] if !ok { level.Error(logger).Log("msg", "Failed to parse section [client] from config file", "err", err) } if dsn, err = cfgsection.FormDSN(target); err != nil { level.Error(logger).Log("msg", "Failed to form dsn from section [client]", "err", err) } collect := q["collect[]"] // Use request context for cancellation when connection gets closed. ctx := r.Context() // If a timeout is configured via the Prometheus header, add it to the context. if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" { timeoutSeconds, err := strconv.ParseFloat(v, 64) if err != nil { level.Error(logger).Log("msg", "Failed to parse timeout from Prometheus header", "err", err) } else { if *timeoutOffset >= timeoutSeconds { // Ignore timeout offset if it doesn't leave time to scrape. level.Error(logger).Log("msg", "Timeout offset should be lower than prometheus scrape timeout", "offset", *timeoutOffset, "prometheus_scrape_timeout", timeoutSeconds) } else { // Subtract timeout offset from timeout. timeoutSeconds -= *timeoutOffset } // Create new timeout context with request context as parent. var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second))) defer cancel() // Overwrite request with timeout context. r = r.WithContext(ctx) } } filteredScrapers := filterScrapers(scrapers, collect) registry := prometheus.NewRegistry() registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) gatherers := prometheus.Gatherers{ prometheus.DefaultGatherer, registry, } // Delegate http serving to Prometheus client library, which will call collector.Collect. h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{}) h.ServeHTTP(w, r) } } func main() { // Generate ON/OFF flags for all scrapers. scraperFlags := map[collector.Scraper]*bool{} for scraper, enabledByDefault := range scrapers { defaultOn := "false" if enabledByDefault { defaultOn = "true" } f := kingpin.Flag( "collect."+scraper.Name(), scraper.Help(), ).Default(defaultOn).Bool() scraperFlags[scraper] = f } // Parse flags. promlogConfig := &promlog.Config{} flag.AddFlags(kingpin.CommandLine, promlogConfig) kingpin.Version(version.Print("mysqld_exporter")) kingpin.HelpFlag.Short('h') kingpin.Parse() logger := promlog.New(promlogConfig) level.Info(logger).Log("msg", "Starting mysqld_exporter", "version", version.Info()) level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext()) var err error if err = c.ReloadConfig(*configMycnf, *mysqldAddress, *mysqldUser, *tlsInsecureSkipVerify, logger); err != nil { level.Info(logger).Log("msg", "Error parsing host config", "file", *configMycnf, "err", err) os.Exit(1) } // Register only scrapers enabled by flag. enabledScrapers := []collector.Scraper{} for scraper, enabled := range scraperFlags { if *enabled { level.Info(logger).Log("msg", "Scraper enabled", "scraper", scraper.Name()) enabledScrapers = append(enabledScrapers, scraper) } } handlerFunc := newHandler(enabledScrapers, logger) http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc)) if *metricsPath != "/" && *metricsPath != "" { landingConfig := web.LandingConfig{ Name: "MySQLd Exporter", Description: "Prometheus Exporter for MySQL servers", Version: version.Info(), Links: []web.LandingLinks{ { Address: *metricsPath, Text: "Metrics", }, }, } landingPage, err := web.NewLandingPage(landingConfig) if err != nil { level.Error(logger).Log("err", err) os.Exit(1) } http.Handle("/", landingPage) } http.HandleFunc("/probe", handleProbe(enabledScrapers, logger)) http.HandleFunc("/-/reload", func(w http.ResponseWriter, r *http.Request) { if err = c.ReloadConfig(*configMycnf, *mysqldAddress, *mysqldUser, *tlsInsecureSkipVerify, logger); err != nil { level.Warn(logger).Log("msg", "Error reloading host config", "file", *configMycnf, "error", err) return } _, _ = w.Write([]byte(`ok`)) }) srv := &http.Server{} if err := web.ListenAndServe(srv, toolkitFlags, logger); err != nil { level.Error(logger).Log("msg", "Error starting HTTP server", "err", err) os.Exit(1) } } mysqld_exporter-0.15.0/mysqld_exporter_test.go000066400000000000000000000156731444546573200217170ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "io" "net" "net/http" "net/url" "os" "os/exec" "reflect" "runtime" "strings" "syscall" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/prometheus/mysqld_exporter/collector" ) // bin stores information about path of executable and attached port type bin struct { path string port int } // TestBin builds, runs and tests binary. func TestBin(t *testing.T) { var err error binName := "mysqld_exporter" binDir, err := os.MkdirTemp("/tmp", binName+"-test-bindir-") if err != nil { t.Fatal(err) } defer func() { err := os.RemoveAll(binDir) if err != nil { t.Fatal(err) } }() importpath := "github.com/prometheus/common" path := binDir + "/" + binName xVariables := map[string]string{ importpath + "/version.Version": "gotest-version", importpath + "/version.Branch": "gotest-branch", importpath + "/version.Revision": "gotest-revision", } var ldflags []string for x, value := range xVariables { ldflags = append(ldflags, fmt.Sprintf("-X %s=%s", x, value)) } cmd := exec.Command( "go", "build", "-o", path, "-ldflags", strings.Join(ldflags, " "), ) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { t.Fatalf("Failed to build: %s", err) } tests := []func(*testing.T, bin){ testLanding, testProbe, } portStart := 56000 t.Run(binName, func(t *testing.T) { for _, f := range tests { f := f // capture range variable fName := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() portStart++ data := bin{ path: path, port: portStart, } t.Run(fName, func(t *testing.T) { t.Parallel() f(t, data) }) } }) } func testLanding(t *testing.T, data bin) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() // Run exporter. cmd := exec.CommandContext( ctx, data.path, "--web.listen-address", fmt.Sprintf(":%d", data.port), "--config.my-cnf=test_exporter.cnf", ) if err := cmd.Start(); err != nil { t.Fatal(err) } defer cmd.Wait() defer cmd.Process.Kill() // Get the main page. urlToGet := fmt.Sprintf("http://127.0.0.1:%d", data.port) body, err := waitForBody(urlToGet) if err != nil { t.Fatal(err) } got := string(body) expected := ` MySQLd Exporter

MySQLd Exporter

Prometheus Exporter for MySQL servers

Version: (version=gotest-version, branch=gotest-branch, revision=gotest-revision)
` if diff := cmp.Diff(expected, got); diff != "" { t.Fatalf("expected != got \n%v\n", diff) } } func testProbe(t *testing.T, data bin) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() // Run exporter. cmd := exec.CommandContext( ctx, data.path, "--web.listen-address", fmt.Sprintf(":%d", data.port), "--config.my-cnf=test_exporter.cnf", ) if err := cmd.Start(); err != nil { t.Fatal(err) } defer cmd.Wait() defer cmd.Process.Kill() // Get the main page. urlToGet := fmt.Sprintf("http://127.0.0.1:%d/probe", data.port) body, err := waitForBody(urlToGet) if err != nil { t.Fatal(err) } got := strings.TrimSpace(string(body)) expected := `target is required` if got != expected { t.Fatalf("got '%s' but expected '%s'", got, expected) } } // waitForBody is a helper function which makes http calls until http server is up // and then returns body of the successful call. func waitForBody(urlToGet string) (body []byte, err error) { tries := 60 // Get data, but we need to wait a bit for http server. for i := 0; i <= tries; i++ { // Try to get web page. body, err = getBody(urlToGet) if err == nil { return body, err } // If there is a syscall.ECONNREFUSED error (web server not available) then retry. if urlError, ok := err.(*url.Error); ok { if opError, ok := urlError.Err.(*net.OpError); ok { if osSyscallError, ok := opError.Err.(*os.SyscallError); ok { if osSyscallError.Err == syscall.ECONNREFUSED { time.Sleep(1 * time.Second) continue } } } } // There was an error, and it wasn't syscall.ECONNREFUSED. return nil, err } return nil, fmt.Errorf("failed to GET %s after %d tries: %s", urlToGet, tries, err) } // getBody is a helper function which retrieves http body from given address. func getBody(urlToGet string) ([]byte, error) { resp, err := http.Get(urlToGet) if err != nil { return nil, err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil } func Test_filterScrapers(t *testing.T) { type args struct { scrapers []collector.Scraper collectParams []string } tests := []struct { name string args args want []collector.Scraper }{ {"args_appears_in_collector", args{ []collector.Scraper{collector.ScrapeGlobalStatus{}}, []string{collector.ScrapeGlobalStatus{}.Name()}, }, []collector.Scraper{ collector.ScrapeGlobalStatus{}, }}, {"args_absent_in_collector", args{ []collector.Scraper{collector.ScrapeGlobalStatus{}}, []string{collector.ScrapeGlobalVariables{}.Name()}, }, []collector.Scraper{collector.ScrapeGlobalStatus{}}}, {"respect_params", args{ []collector.Scraper{ collector.ScrapeGlobalStatus{}, collector.ScrapeGlobalVariables{}, }, []string{collector.ScrapeGlobalStatus{}.Name()}, }, []collector.Scraper{ collector.ScrapeGlobalStatus{}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := filterScrapers(tt.args.scrapers, tt.args.collectParams); !reflect.DeepEqual(got, tt.want) { t.Errorf("filterScrapers() = %v, want %v", got, tt.want) } }) } } mysqld_exporter-0.15.0/probe.go000066400000000000000000000042011444546573200165070ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "net/http" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/mysqld_exporter/collector" ) func handleProbe(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() params := r.URL.Query() target := params.Get("target") if target == "" { http.Error(w, "target is required", http.StatusBadRequest) return } collectParams := r.URL.Query()["collect[]"] authModule := params.Get("auth_module") if authModule == "" { authModule = "client" } cfg := c.GetConfig() cfgsection, ok := cfg.Sections[authModule] if !ok { level.Error(logger).Log("msg", fmt.Sprintf("Could not find section [%s] from config file", authModule)) http.Error(w, fmt.Sprintf("Could not find config section [%s]", authModule), http.StatusBadRequest) return } dsn, err := cfgsection.FormDSN(target) if err != nil { level.Error(logger).Log("msg", fmt.Sprintf("Failed to form dsn from section [%s]", authModule), "err", err) http.Error(w, fmt.Sprintf("Error forming dsn from config section [%s]", authModule), http.StatusBadRequest) return } filteredScrapers := filterScrapers(scrapers, collectParams) registry := prometheus.NewRegistry() registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) h.ServeHTTP(w, r) } } mysqld_exporter-0.15.0/test_exporter.cnf000066400000000000000000000002111444546573200204450ustar00rootroot00000000000000[client] host=localhost port=3306 socket=/var/run/mysqld/mysqld.sock user=foo password=bar [client.server1] user = bar password = bar123 mysqld_exporter-0.15.0/test_image.sh000077500000000000000000000012331444546573200175330ustar00rootroot00000000000000#!/bin/bash set -exo pipefail docker_image=$1 port=$2 container_id='' wait_start() { for in in {1..10}; do if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then docker_cleanup exit 0 else sleep 1 fi done exit 1 } docker_start() { container_id=$(docker run -d --network mysql-test -p "${port}":"${port}" "${docker_image}" --config.my-cnf=test_exporter.cnf) } docker_cleanup() { docker kill "${container_id}" } if [[ "$#" -ne 2 ]] ; then echo "Usage: $0 quay.io/prometheus/mysqld-exporter:v0.10.0 9104" >&2 exit 1 fi docker_start wait_start