pax_global_header00006660000000000000000000000064141732554000014513gustar00rootroot0000000000000052 comment=29118cc5dad686a9a4847bb9a50c1107eaefffa3 pgbadger-11.7/000077500000000000000000000000001417325540000132165ustar00rootroot00000000000000pgbadger-11.7/.editorconfig000066400000000000000000000004611417325540000156740ustar00rootroot00000000000000root = true [*] charset = utf-8 indent_style = tab indent_size = 8 # Unix-style newlines end_of_line = lf # Remove any whitespace characters preceding newline characters trim_trailing_whitespace = true # Newline ending every file insert_final_newline = true [*.yml] indent_style = space indent_size = 2 pgbadger-11.7/.gitignore000066400000000000000000000000611417325540000152030ustar00rootroot00000000000000blib/ Makefile MYMETA.json MYMETA.yml pm_to_blib pgbadger-11.7/CONTRIBUTING.md000066400000000000000000000020531417325540000154470ustar00rootroot00000000000000# How to contribute ## Before submitting an issue 1. Upgrade to the latest version of pgBadger and see if the problem remains 2. Look at the [closed issues](https://github.com/darold/pgbadger/issues?state=closed), we may have already answered a similar problem 3. [Read the doc](http://pgbadger.darold.net/documentation.html), it is short and useful ## Coding style The pgBadger project provides a [.editorconfig](http://editorconfig.org/) file to setup consistent spacing in files. Please follow it! ## Keep documentation updated The first pgBadger documentation is `pgbadger --help`. `--help` fills the SYNOPSIS section in `doc/pgBadger.pod`. The DESCRIPTION section *must* be written directly in `doc/pgBadger.pod`. `README` is the text formatting of `doc/pgBadger.pod`. After updating `doc/pdBadger.pod`, rebuild `README` and `README.md` with the following commands: ```shell $ perl Makefile.PL && make README ``` When you're done contributing to the docs, commit your changes. Note that you must have `pod2markdown` installed to generate `README.md`. pgbadger-11.7/ChangeLog000066400000000000000000004166771417325540000150150ustar00rootroot000000000000002022-01-23 - v11.7 This release of pgBadger fix some issues reported by users since past five months as well as some improvements: * Add new option --no-progressbar option to not display it but keep the other outputs. * Add new option --day-report that can be used to rebuild an HTML report over the specified day. Like option --month-report but only for a day. It requires the incremental output directories and the presence of all necessary binary data files. The value is date in format: YYYY-MM-DD * Improve parsing of Heroku logplex and cloudsql json logs. Here is the complete list of changes and acknowledgments: - Update contribution guidelines and Makefile.PL to improve consistency, clarity, and dependencies. Thanks to diffuse for the patch. - Fix use of last parse file (--last-parsed) with binary mode. Thanks to wibrt for the report. - Add regression test for --last-parsed use and fix regression test on report for temporary files only. - Fix title for session per host graph. Thanks to Norbert Bede for the report. - Fix week number when computing weeks reports when --iso-week-number and --incremental options was enabled. Thanks to hansgv for the report. - Add --no-progressbar option to not display it and keep the other outputs. Thanks to seidlmic for the feature request. - Prevent too much unknown format line prints in debug mode for multi-line jsonlog. - Fix parsing of single line cloudsql json log. Thanks to Thomas Leclaire for the report. - Fix temporary files summary with log_temp_files only. - Print debug message with -v even if -q or --quiet is used. - Fix autodetection of jsonlog file. - Fix parsing of cloudsql log file. Thanks to Luc Lamarle for the report. - Fixes pid extraction in parse_json_input. Thanks to Francois Scala for the patch. - Add new option --day-report with value as date in format: YYYY-MM-DD that can be used to rebuild an HTML report over the specified day. Thanks to Thomas Leclaire for the feature request. - Fix query counter in progress bar. Thanks to Guillaume Lelarge for the report. - Fix incomplete queries stored for top bind and prepare reports. - Fix normalization of object identifier, in some case the numbers was replaced by a ?. - Fix unformatted normalized queries when there is a comment at beginning. - Fix multi-line in stderr format when --dbname is used. Thanks to Guillaume Lelarge for the report. - Fix not generated reports in incremental mode when --dbname is used. Thanks to Dudley Perkins for the report. - Do not die anymore if a binary file is not compatible, switch to next file. Thanks to Thomas Leclaire for the suggestion. - Fix Heroku logplex format change in pgbadger parser. Thanks to François Pietka for the report. 2021-09-04 - v11.6 This release of pgBadger fix some issues reported by users since past seven months as well as some improvements: * Add detection of Query Id in log_line_prefix new in PG14. Thanks to Florent Jardin for the report. * Add advanced regression tests with db exclusion and the explode feature. Thanks to MigOps Inc for the patch. * Apply multiprocess to report generation when --explode is used. Thanks to MigOps Inc for the patch and Thomas Leclaire for the feature request. * Add --iso-week-number in incremental mode, calendar's weeks start on a Monday and respect the ISO 8601 week number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. Thanks to Alex Muntada for the feature request. * Add command line option --keep-comments to not remove comments from normalized queries. It can be useful if you want to distinguish between same normalized queries. Thanks to Stefan Corneliu Petrea for the feature request. * Skip INFO lines introduced in PostgreSQL log file by third parties software. Thanks to David Piscitelli for the report. * Add compatibility with PostgresPro log file including rows number and size in bytes following the statement duration. Thanks to panatamann for the report. * Parse times with T's to allow using the timestamps from journalctl. Thanks to Graham Christensen for the patch. * Improve Windows port. Thanks to Bertrand Bourgier for the patches. Important note: * Expect that --iso-week-number will be the default in next major release and that --start-monday option will be removed as the week will always start a Monday. The possibility to have week reports start a Sunday will be removed to simplify the code. Here is the complete list of changes and acknowledgments: - Fix duplicate of warning message: "database ... must be vacuumed within ... transactions". Thank to Christophe Courtois for the report. - Fix use of uninitialized variable. Thanks to phiresky for the report. - Improve query id detection, it can be negative, as well as read it from csvlog. - Fix case where last file in incremental mode is always parsed even if it was already done. Thanks to Thomas Leclaire for the report. - Update syslog format regex to handle where session line indicator only contains one int vs two ints separated by dash. Thanks to Timothy Alexander for the patch. - Fix --exclude-db option to create anyway the related report with json log. Thanks to MigOps Inc for the patch and Thomas Leclaire for the report. - Add regression test about Storable buggy version. - Fix use of uninitialized value in substitution iterator in incremental mode during the week report generation. Thanks to Thomas Leclaire, Michael Vitale, Sumeet Shukla and Stefan Corneliu Petrea for the report. - Add 'g' option to replace all bind parameters. Thanks to Nicolas Lutic and Sebastien Lardiere for the patch. - Documentation improvements. Thanks to Stefan Petrea for the patch. - Fixes change log time zone calculation. Thanks to Stefan Petrea for the patch. - Fix log filter by begin/end time. - Fix wrong association of orphan lines for multi-line queries with a filter on database. Thanks to Abhishek Mehta for the report. - Fix reports in incremental mode when --dbname parameter is partially ignored with "explode" option (-E). Thanks to lrevest for the report. - Update javascript resources. - Fix display of menu before switching to hamburger mode when screen is reduced. Thanks to Guillaume Lelarge for the report. - Fix bind parameters values over multiple lines in the log that were not well supported. - Apply same fix for previous patch than in pgFormatter. - Fix an other use of uninitialized value in substitution iterator from pgFormatter code. Thanks to Christophe Courtois for the report. - Fix query normalization. Thanks to Jeffrey Beale for the patch. - Be sure that all statements end with a semicolon when --dump-all-queries is used. Thanks to Christian for the report. - Fix typo and init of EOL type with multiple log files. - Add auto detection of EOL type to fix LAST_PARSED offset when OEL is on 2 bytes (Windows case). Thanks to Bertrand Bourgier for the patch. - Fix get_day_of_week() port on Windows where strftime %u is not supported. Thanks to Bertrand Bourgier for the patch. - Fix Windows port that call pl2bat.bat perl utility to create a corrupted pgbadger.bat du to the way __DATA__ was read in pgbadger. Thanks to Bertrand Bourgier for the patch. - Fix begin/end time filter and add regression test for timestamp filters. Thanks to Alexis Lahouze and plmayekar for the report. - Fix use of uninitialized value in pattern match introduced by pgFormatter update. Thanks to arlt for the report. 2021-02-18 - v11.5 This release of pgBadger fix some issues reported by users since past three months as well as some improvements: * Add report about sessions idle time, computed using: "total sessions time - total queries time / number of sessions This require that log_connection and log disconnection have been enabled and that log_min_duration_statement = 0 (all queries logged) to have a reliable value. This can help to know how much idle time is lost, and if a pooler transaction mode would be useful. This report is available in the "Sessions" tab of "Global Stats" and in the "Sessions" tab of "General Activity" reports (per hour). * Add anonymization of numeric values, replaced by 4 random digits. * Update SQL beautifier based on pgFormatter 5.0. Here is the complete list of changes and acknowledgments: - Fix parsing of cloudsql multi-line statement. Thanks to Jon Young for the report. - Add regression test for anonymization. - Fix anonymization broken by maxlength truncate. Thanks to artl for the report. - Add anonymization of parameter in time consuming prepare and bind reports. Thanks to arlt for the report. - Add support to microseconds in logplex log line prefix. Thanks to Ross Gardiner for the report. - Add report about sessions idle time. Thanks to Guillaume Lelarge for the feature request. - Complete patch to support multi-line in jsonlog format. 2020-11-24 - v11.4 This release of pgBadger fix some issues reported by users since past four months. Improve support for PostgreSQL 13 log information and adds some new features: * Add full autovacuum information in "Vacuums per table" report for buffer usage (hits, missed, dirtied), skipped due to pins, skipped frozen and WAL usage (records, full page images, bytes). In report "Tuples removed per table" additional autovacuum information are tuples remaining, tuples not yet removable and pages remaining. These information are only available on the "Table" tab. * Add new repartition report about checkpoint starting causes. * Add detection of application name from connection authorized traces. Here is the complete list of changes and acknowledgments: - Fix typo in an error message. Thanks to Vidar Tyldum for the patch. - Fix Windows port with error: "can not load incompatible binary data". Thanks to Eric Brawner for the report. - Fix typo on option --html-outdir in pgbadger usage and documentation. Thanks to Vidar Tyldum for the patch. - Fix autodetection of jsonlog/cloudsql format. Thanks to Jon Young for the report. - Fix CSV log parsing with PG v13. Thanks to Kanwei Li for the report and Kaarel Moppel for the patch. - Fix sort of queries generating the most temporary files report. Thanks to Sebastien Lardiere for the report. - Add pgbadger version trace in debug mode. 2020-07-26 - v11.3 This release of pgBadger fix several issues reported by users since past four months. It also adds some new features and new command line options: * Add autodetection of UTC timestamp to avoid applying timezone for graphs. * Add support to GCP CloudSQL json log format. * Add new option --dump-all-queries to use pgBadger to dump all queries to a text file, no report is generated just the full list of statements found in the PostgreSQL log. Bind parameters are inserted into the queries at their respective position. * Add new option -Q | --query-numbering used to add numbering of queries to the output when using options --dump-all-queries or --normalized-only. * Add new command line option --tempdir to set the directory where temporary files will be written. Can be useful on system that do not allow writing to /tmp. * Add command line option --ssh-port used to set the ssh port if not default to 22. The URI notation also adds support to ssh port specification by using the form: ssh://192.168.1.100:2222//var/log/postgresql-11.log Here is the complete list of changes and acknowledgments: - Fix incremental reports for jsonlog/cloudsql log format. Thanks to Ryan DeShone for the report - Add autodetection of UTC timestamp to avoid applying autodetected timezone for graphs. With UTC time the javascript will apply the local timezone. Thanks to Brett Stauner for the report. - Fix incremental parsing of journalctl logs doesn't work from the second run. Thanks to Paweł Koziol for the patch. - Fix path to resources file when -X and -E are used. Thanks to Ryan DeShone for the report. - Fix General Activity report about read/write queries. Thanks to alexandre-sk5 for the report. - Add debug message when parallel mode is not use. - Fix elsif logic in file size detection and extra space introduced in the journalctl command when the --since option is added. Thanks to Pawel Koziol for the patch. - Fix "not a valid file descriptor" error. Thanks to Pawel Koziol for the report. - Fix incremental mode with RDS files. Thanks to Ildefonso Camargo, nodje and John Walsh for the report. - Add new option -Q | --query-numbering used to add numbering of queries to the output when using options --dump-all-queries or --normalized-only. This can be useful to extract multiline queries in the output file from an external script. Thanks to Shantanu Oak for the feature request. - Fix parsing of cloudsql json logs when log_min_duration_statement is enabled. Thanks to alexandre-sk5 for the report. - Fix wrong hash key for users in RDS log. Thanks to vosmax for the report. - Fix error related to modification of non-creatable array value. Thanks to John Walsh and Mark Fletcher for the report. - Add support to GCP CloudSQL json log format, log format (-f) is jsonlog. Thanks to Thomas Poindessous for the feature request. - Add new option --dump-all-queries to use pgBadger to dump all queries to a text file, no report is generated just the full list of statements found in the PostgreSQL log. Bind parameters are inserted into the queries at their respective position. There is not sort on unique queries, all queries are logged. Thanks to Shantanu Oak for the feature request. - Add documentation for --dump-all-queries option. - Fix vacuum report for new PG version. Thanks to Alexey Timanovsky for the report. - Add new command line option --no-process-info to disable change of process title to help identify pgbadger process, some system do not allow it. Thanks to Akshay2378 for the report. - Add new command line option --tempdir to set the directory where temporary files will be written. Default: File::Spec->tmpdir() || '/tmp' Can be useful on system that do not allow writing to /tmp. Thanks to Akshay2378 for the report. - Fix unsupported compressed filenames with spaces and/or brackets. Thanks to Alexey Timanovsky for the report. - Add command line option --ssh-port used to set the ssh port if not default to 22. The URI notation also adds support to ssh port specification by using the form: ssh://192.168.1.100:2222//var/log/postgresql-11.log Thanks to Augusto Murri for the feature request. 2020-03-11 - v11.2 This release of pgBadger fix several issues reported by users since past six months. It also adds some new features: * Add support and autodetection of AWS redshift log format. * Add support to pgbouncer 1.11 new log format. * Handle zstd and lz4 compression format * Allow to fully separate statistics build and HTML report build in incremental mode without having to read a log file. For example it is possible to run pgbadger each hours as follow: pgbadger -I -O "/out-dir/data" --noreport /var/log/postgresql*.log It just creates the data binary files in "/out-dir/data" then for example you can make reports each night for the next day in a separate directory `/out-dir/reports`: pgbadger -I -l "/out-dir/data/LAST_PARSED" -H "/out-dir/reports" /out-dir/data/2020/02/19/*.bin This require to set the path to the last parsed information, the path where HTML reports will be written and the binary data file of the day. There is also new command line options: * Add new command line option --explain-url used to override the url of the graphical explain tool. Default URL is: http://explain.depesz.com/?is_public=0&is_anon=0&plan= If you want to use a local install of PgExplain or an other tool. pgBadger will add the plan in text format escaped at the end of the URL. * Add new option --no-week to instruct pgbadger to not build weekly reports in incremental mode. Useful if it takes too much time and resources. * Add new command line option --command to be able to set a command that pgBadger will execute to retrieve log entries on stdin. pgBadger will open a pipe to the command and parse log entries generated by the command. For example: pgbadger -f stderr --command 'cat /var/log/postgresql.log' which is the same as executing pgbadger with the log file directly as argument. The interest of this option is obvious if you have to modify the log file on the fly or that log entries are extracted from a program or generated from a database. For example: pgbadger -f csv --command 'psql dbname -c "COPY jrn_log TO STDOUT (FORMAT CSV)"' * Add new command line option --noexplain to prevent pgBadger to parse and report explain plan written to log by auto_explain extension. This is useful if you have a PostgreSQL version < 9.0 where pgBadger generate broken reports when there is explain plan in log. Backward compatibility: - By default pgBadger will truncate queries up to 100000 characters. This arbitrary value and can be adjusted using option --maxlength. Previous behavior was to not truncate queries but this could lead in excessive resources usage. Limiting default size is safer and the size limit might allow no truncate in most cases. However queries will not be beautified if they exceed 25000 characters. Here is the complete list of changes and acknowledgments: - Fix non working --exclude-client option. Thanks to John Walsh for the report. - Add regression test for RDS log parsing and --exclude-client. - Fix progress bar for pgbouncer log file. The "queries" label is changed in "stats" for pgbouncer log files. - Add command line option --explain-url used to override the url of the graphical explain tool. Thanks to Christophe Courtois for the feature request. - Add support to pgbouncer 1.11 new log format. Thanks to Dan Aksenov for the report. - Handle zstd and lz4 compression format. Thanks to Adrien Nayrat for the patch. - Add support and autodetection of AWS redshift log format. Thanks to Bhuvanesh for the reature request. - Update documentation about redshift log format. - Add new option --no-week to instruct pgbadger to not build weekly reports in incremental mode. Thanks to cleverKermit17 for the feature request. - Fix a pattern match on file path that breaks pgBadger on Windows. - Fix #554 about cyrillic and other encoded statement parameters that was not reported properly in the HTML report even with custom charset. The regression was introduced with a fix to the well known Perl error message "Wide character in print". The patch have been reverted and a new command line option: --wide-char is available to recover this behavior. Add this option to your pgbadger command if you have message "Wide character in print". Add a regression test with Cyrillic and french encoding. Thanks to 4815162342lost and yethee for the report. - Update documentation to inform that lc_messages = 'en_US.UTF-8' is valid too. Thanks to nodje for the report. - Update documentation about --maxlength which default truncate size is 100000 and no more default to no truncate. Thanks to nodje for the report. - Fix retention calculation at year overlap. Thanks to Fabio Pereira for the patch. - Fix parsing of rds log file format. Thanks to Kadaffy Talavera for the report. - Prevent generating empty index file in incremental mode when there is no new log entries. Thanks to Kadaffy Talavera for the report. - Fix non up to date documentation. Thanks to Eric Hanson for the patch. - Fixes the command line parameter from -no-explain to -noexplain. Thanks to Indrek Toom for the patch. - Fall back to default file size when totalsize can not be found. Thanks to Adrien Nayrat for the patch. - Fix some dates in examples. Thanks to Greg Clough for the patch. - Use compressed file extension regexp in remaining test and extract .bin extension in a separate condition. - Handle zstd and lz4 compression format. Thanks to Adrien Nayrat for the patch. - Fix remaining call of SIGUSR2 on Windows. Thanks to inrap for the report. - Fix progress bar with log file of indetermined size. - Add new command line option --command to be able to set a command that pgBadger will execute to retrieve log entries on stdin. Thanks to Justin Pryzby for the feature request. - Add new command line option --noexplain to prevent pgBadger to parse and report explain plan written to log by auto_explain extension. This is useful if you have a PostgreSQL version < 9.0 where pgBadger generate broken reports when there is explain plan in log. Thanks to Massimo Sala for the feature request. - Fix RDS log parsing when the prefix is set at command line. Thanks to Bing Zhao for the report. - Fix incremental mode with rds log format. Thanks to Bing Zhao for the report. - Fix possible rds log parsing. Thanks to James van Lommel and Simon Dobner for the report. - Fix statement classification and add regression test. Thanks to alexanderlaw for the report. - Fix anonymization of single characters in IN clause. Thanks to Massimo Sala for the report. - Fix RDS log parsing for rows without client/user/db information. Thanks to Konrad for the report. 2019-09-16 - v11.1 This release of pgBadger fix several issues reported by users since three months. It also adds some new features and reports: - Add report of top N queries that consume the most time in the prepare or parse stage. - Add report of top N queries that consume the most time in the bind stage. - Add report of timing for prepare/bind/execute queries parts. Reported in a new "Duration" tab in Global Stats report. Example: Total query duration: 6m16s Prepare/parse total duration: 45s564ms Bind total duration: 4m46s Execute total duration: 44s71m This also fix previous report of "Total query duration" that was only reporting execute total duration. - Add support to RDS and CloudWatch log format, they are detected automatically. You can use -f rds if pgbadger is not able to auto-detect the log format. - Add new configuration option --month-report to be able to build monthly incremental reports. - Restore support to Windows operating system. There's also some bugs fixes and features enhancements. - Add auto-generated Markdown documentation in README.md using tool pod2markdown. If the command is not present the file will just not be generated. Thanks to Derek Yang for the patch. - Translate action WITH into CTE, regression introduced in last release. - Fix support of Windows Operating System - Add support to RDS and CloudWatch log format, use -f rds if pgbadger is not able to auto-detect this log format. Thanks to peruuparkar for the feature request. - Fix option -f | --format that was not applied on all files get from the parameter list where log format auto-detection was failing, the format was taken from the fist file parsed. Thanks to Levente Birta for the report. - Update source documentation file to replace reference to pgBadger v7.x with v11. Thanks to Will Buckner for the patch. - Limit height display size of top queries to avoid taking the whole page with huge queries. Thanks to ilias ilisepe1 for the patch. - Fix overflow of queries and detail in Slowest individual queries. - Fix SSH URIs for files, directories and wildcards. Thanks to tbussmann for the patch. - Fix URI samples in documentation. Thanks to tbussmann for the patch. - Hide message of use of default out file when --rebuild is used. - Add extra newline to usage() output to not bread POD documentation at make time. - Reapply --exclude-client option description in documentation. Thanks to Christoph Berg for the report. 2019-06-25 - v11.0 This release of pgBadger adds some major new features and fixes some issues reported by users since the last four months. New features: - Regroup cursor related query (DECLARE,CLOSE,FETCH,MOVE) into new query type CURSOR. - Add top bind queries that generate the more temporary files. Require log_connection and log_disconnection be activated. - Add --exclude-client command line option to be able to exclude log entries for the specified client ip. Can be used multiple time. - Allow to use time only in --begin and --end filters. - Add -H, --html-dir option to be able to set a different path where HTML report must be written in incremental mode. Binary files stay on directory defined with -O, --outdir option. - Add -E | --explode option to explode the main report into one report per database. Global information not related to a database are added to the postgres database report. - Add per database report to incremental mode. In this mode there will be a sub directory per database with dedicated incremental reports. - Add support to Heroku's PostgreSQL logplex format. Log can be parsed using: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - - When a query is > 10Kb we first limit size of all constant string parameters to 30 characters and then the query is truncated to 10Kb. This prevent pgbadger to waste time/hang with very long queries when inserting bytea for example. The 10Kb limit can be controlled with the --maxlength command line parameter. The query is normalized or truncated to maxlength value only after this first attempt to limit size. This new release breaks backward compatibility with old binary or JSON files. This also mean that incremental mode will not be able to read old binary file. If you want to update pgBadger and keep you old reports take care to upgrade at start of a new week otherwise weekly report will be broken. pgBadger will print a warning and just skip the old binary file. There's also some bugs fixes and features enhancements. - Add a warning about version and skip loading incompatible binary file. - Update code formatter to pgFormatter 4.0. - Fix pgbadger hang on Windows OS. Thanks to JMLessard for the report. - Update tools/pgbadger_tools script to be compatible with new binary file format in pgBadger v11. - Add top bind queries that generate the more temporary files. This collect is possible only if log_connection and log_disconnection are activated in postgresql.conf. Thanks to Ildefonso Camargo for the feature request. - Fix auto detection of timezone. Thanks to massimosala for the fix. - Remove some remaining graph when --nograph is used - Force use of .txt extension when --normalized-only is used. - Fix report of auto vacuum/analyze in logplex format. Thanks to Konrad zichul for the report. - Fix use of progress bar on Windows operating system. Thanks to JMLessard for the report. - Use a `$prefix_vars{'t_time'} to store the log time. Thanks to Luca Ferrari for the patch. - Update usage and documentation to remove perl command from pgbadger invocations. Thanks to Luca Ferrari for the patch. - Use begin and end with times without date. Thanks to Luca Ferrari for the patch. - Added some very minor spelling and grammar fixes to the readme file. Thanks to ofni yratilim for the patch. - Fix remote paths using SSH. Thanks to Luca Ferrari for the patch. - Update regression test to works with new structure introduced with the per database report feature. - Fix fractional seconds in all begin and end parameters. Thanks to Luca Ferrari for the patch. - Fix documentation URL. Thanks to Kara Mansel for the report. - Fix parsing of auto_explain. Add more information about -U option that can be used multiple time. Thanks to Douglas J Hunley for the report. - Lot of HTML / CSS report improvements. Thanks to Pierre Giraud for the patches. - Update resource file. - Add regression test for logplex format. - Add support to Heroku's PostgreSQL logplex format. You should be able to parse these logs as follow: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - or if you have already saved the output to a file: pgbadger heroku.log The logplex format is auto-dectected like any other supported format. pgBadger understand the following default log_line_prefix: database = %d connection_source = %r sql_error_code = %e or simply: sql_error_code = %e Let me know if there's any other default log_line_prefix. The prefix can always be set using the -p | --prefix pgbadger option: pgbadger --p 'base = %d source = %r sql_state = %e' heroku.log for example. Thanks to Anthony Sosso for the feature request. - Fix pgbadger help on URI use. - Fix broken wildcard use in ssh URI introduced in previous patch. Thanks to Tobias Bussmann for the report. - Allow URI with space in path to log file. Thanks to Tobias Bussmann for the report. - Fix URI samples in documentation. Thanks to Tobias Bussmann for the patch. - Fix t/02_basics.t to don't fail if syslog test takes more than 10s. Thanks to Christoph Berg for the patch. 2019-02-14 - v10.3 This release of pgBadger is a maintenance release that fixes some log format autodetection issues another pgBouncer log parsing issue reported by users. There is also a new feature: The -o | --outfile option can now be used multiple time to dump output in several format in a single command. For example: pgbadger -o out.html -o out.json /log/pgsql-11.log will create two reports in html and json format saved in the two corresponding files. There's also some bugs fixes and features enhancements. - Fix statistics reports when there a filter on database, user, client or application is requested. Some queries was not reported. - Fix autodetection of pg>=10 defauilt log line prefix. - Fix autodetection of log file with "non standard" log line prefix. If --prefix specify %t, %m, %n and %p or %c, set format to stderr. Thanks to Alex Danvy for the report. - Remove extra space at end of line. - Add minimal test to syslog parser. - Fix a call to autodetect_format(). - Truncate statement when maxlength is used. Thanks to Thibaud Madelaine for the patch. - Add test for multiple output format. - The -o | --outfile option can now be used multiple time to dump output in several format in a single command. For example: pgbadger -o out.txt -o out.html -o - -x json /log/pgsql-11.log Here pgbadger will create two reports in text and html format saved in the two corresponding file. It will also output a json report on standard output. Thanks to Nikolay for the feature request. - Move detection of output format and setting of out filename into a dedicated function set_output_extension(). - Fix another pgBouncer log parsing issue. Thanks to Douglas J. Hunley for the report. 2018-12-27 - v10.2 This release of pgBadger is a maintenance release that fixes issues reported by users during last three months. There is also some new features: * Add support to pgbouncer 1.8 Stats log format. * Auto adjust javascript graph timezone. There is a new command line option: * Add --exclude-db option to compute report about everything except the specified database. * Add support to http or ftp remote PostgreSQL log file download. The log file is parsed during the download using curl command and never saved to disk. With ssh remote log parsing you can use uri as command line argument to specify the PostgreSQL log file. ssh://localhost/postgresql-10-main.log http://localhost/postgresql-10-main.log.gz ftp://localhost/postgresql-10-main.log with http and ftp protocol you need to specify the log file format at end of the uri: http://localhost/postgresql-10-main.log:stderr You can specify multiple uri for log files to be parsed. This is useful when you have pgbouncer log file on a remote host and PostgreSQL logs in the local host. With ssh protocol you can use wild card too like with remote mode, ex: ssh://localhost/postgresql-10-main.log* Old syntax to parse remote log file using -r option is still working but is obsolete and might be removed in future versions. There's also some bugs fixes and features enhancements. - Adjust end of progress bar with files with estimate size (bz2 compressed files and remote compressed files. - Update year in copyright. - Add information about URI notation to parse remote log files. - Force progress to reach 100% at end of parsing of compressed remote file. - Extract information about PL/pgSQL function call in queries of temporary file reports. The information is append to the details display block. - Fix progress bar with csv files. - Fix reading binary file as input file instead of log file. - Encode html output of queries into UTF8 to avoid message "Wide character in print". Thanks to Colin 't Hart for the report. - Add Checkpoints distance key/value for distance peak. - Fix pgbouncer parsing and request throughput reports. Thanks to Levente Birta for the report. - Fix use of csvlog instead of csv for input format. - Add support to pgbouncer 1.8 Stats log format. Thanks to Levente Birta for the report. - Add warning about parallel processing disabled with csvlog. Thanks to cstdenis for the report. - Add information in usage output about single process forcing with csvlog format in -j and -J options. Thanks to cstdenis for the report. - Fix unknown line format error for multi line log while incremental analysis over ssh. Thanks to Wooyoung Cho for the report. - Add -k (--insecure) option to curl command to be able to download logs from server using a self signed certificate. - Auto adjust javascript graph timezone. Thanks to Massimino Sala for the feature request. - Add support to HTTP logfile download by pgBadger, for example: /usr/bin/pgbadger http://www.mydom.com/postgresql-10.log - Will parse the file during download using curl command. - Fix documentation. Thanks to 0xflotus for the patch. - Reapply fix on missing replacement of bind parameters after some extra code cleaning. Thanks to Bernhard J. M. Grun for the report. - Add --exclude-db option to compute report about everything except the specified database. The option can be used multiple time. 2018-09-12 - v10.1 This release of pgBadger is a maintenance release that fixes reports in incremental mode and multiprocess with -j option. Log parsing from standard input was also broken. If you are using v10.0 please upgrade now. - Add test on pgbouncer log parser. - Some little performances improvment. - Fix not a valid file descriptor at pgbadger line 12314. - Fix unwanted newline in progressbar at startup. - Remove circleci files from the project. - Remove dependency of bats and jq for the test suite, they are replaced with Test::Simple and JSON::XS. - Add more tests especially for incremental mode and input from stdin that was broken in release 10.0. - Sync pgbadger, pod, and README, and fix some syntax errors. Thanks to Christoph Berg for the patch. - Add documentation on how to install Perl module JSON::XS from apt and yum repositories. - Fix URI for CSS in incremental mode. Thanks to Floris van Nee for the report. - Fix fatal error when looking for log from STDIN. Thanks to Jacek Szpot for the report. - Fixes SED use for OSX builds. Thanks to Steve Newson for the patch. - Fix illegal division by zero in incrental mode. Thanks to aleszeleny for the report. - Replace SQL::Beautify with v3.1 of pgFormatter::Beautify. 2018-09-09 - v10.0 This release of pgBadger is a major release that adds some new features and fix all issues reported by users since last release. * Add support of pgbouncer syslog log file format. * Add support to all auto_explain format (text, xml, json and yaml). * Add support to %q placeholder in log_line_prefix. * Add jsonlog format of Michael Paquier extension, with -f jsonlog pgbadger will be able to parse the log. * Replace the SQL formatter/beautify with v3.0 of pgFormatter. There is some new command line option: - Add --prettify-json command line option to prettify JSON output. - Add --log-timezone +/-XX command line option to set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Note that you might still need to adjust the graph timezone using -Z when the client has not the same timezone. - Add --include-time option to add the ability to choose times that you want to see, instead of excluding all the times you do not want to see (--exclude-time). The pgBadger project and copyrights has been transfered from Dalibo to the author and official maintainer of the project. Please update your links: - Web site: http://pgbadger.darold.net/ - Source code: https://github.com/darold/pgbadger I want to thanks the great guys at Dalibo for all their investments into pgBadger during these years and especially Damien Clochard and Jean-paul argudo for their help to promote pgBadger. - Fix checkpoint distance and estimate not reported in incremental mode. Thanks to aleszeleny for the report. - Fix title of pgbouncer simultaneous session report. Thansks to Jehan Guillaume De Rorthais for the report. - Add support of pgbouncer syslog log file format. Thanks to djester for the feature request. - Fix error when a remote log is empty. Thanks to Parasit Hendersson for the report. - Fix test with binary format. Binary file must be generated as it is dependent of the plateform. Thanks to Michal Nowak for the report. - Fix case where an empty explain plan is generated. - Fix parsing of autodetected default format with a prefix in command line. - Remove dependency of git command in Makefile.PL. - Update documentation about options changes and remove of the [%l-1] part of the mandatory prefix. - Fix parsing of vacuum / analyze system usage for PostgreSQL 10. Thanks to Achilleas Mantzios for the patch. - Fix Temporary File Activity table. - Remove dependency to git during install. - Add --log-timezone +/-XX command line option to set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time because the time will not be the same in the log. Note that you might still need to adjust the graph timezone using -Z when the client has not the same timezone. Thanks to xdexter for the feature request and Julien Tachoire for the patch. - Add support to auto_explain json output format. Thanks to dmius for the report. - Fix auto_explain parser and queries that was counted twice. Thanks to zam6ak for the report. - Fix checkpoint regex to match PostgreSQL 10 log messages. Thanks to Edmund Horner for the patch. - Update description of -f | --format option by adding information about jsonlog format. - Fix query normalisation to not duplicate with bind queries. Normalisation of values are now tranformed into a single ? and no more 0 for numbers, two single quote for string. Thanks to vadv for the report. - Fix log level count. Thanks to Jean-Christophe Arnu for the report - Make pgbadger more compliant with B::Lint bare sub name. - Made perlcritic happy. - Add --prettify-json command line option to prettify JSON output. Default output is all in single line. - Fix Events distribution report. - Fix bug with --prefix when log_line_prefix contain multiple %%. Thanks to svb007 for the report. - Add --log-timezone +/-XX command line option to set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time because the time will not be the same in the log. Note that you might still need to adjust the graph timezone using -Z when the client has not the same timezone. Thanks to xdexter for the feature request. - Remove INDEXES from the keyword list and add BUFFERS to this list. - Fix normalization of query using cursors. - Remove Dockerfile and documentation about docker run. pgBadger comes as a single Perl script without any dependence and it can be used on any plateform. It is a non sens to use docker to run pgbadger, if you don't want to install anything, just copy the file pgbadger where you want and execute it. - Fix broken grid when no temp files activity. Thanks to Pierre Giraud for the patch - Add doc warning about log_in_duration_statement vs log_duration + log_statement. Thanks to Julien Tachoire for the patch. - Apply timezone offset to bar charts. Thanks to Julien Tachoire for the patch. - Delete current temp file info if we meet an error for the same PID Thanks to Julien Tachoire for the patch. - Consistently use app= in examples, and support appname= Some of the usage examples used appname= in the prefix, but the code didn't recognize that token. Use app= in all examples, and add appname= to the prefix parser. Thanks to Christoph Berg for the patch - Fix wrong long name for option -J that should be --Jobs intead of --job_per_file. Thanks to Chad Trabant for the report and Etienne Bersac for the patch. - Ignore blib files. Thanks to Etienne Bersac for the patch. - Add consistency tests. Thanks to damien clochard for the patch. - doc update : stderr is not a default for -f. Thanks to Christophe Courtois for the patch. - Always update pod and README. Thanks to Etienne Bersac for the patch. - Add some regression tests. Thanks to Etienne Bersac for the patch. - Add editorconfig configuration. Thanks to Etienne Bersac for the patch. - Drop vi temp files from gitignore. Thanks to Etienne Bersac for the patch. - Add --include-time option to add the ability to choose times that you want to see, instead of excluding all the times you do not want to see. This is handy when wanting to view only one or two days from a week's worth of logs (simplifies down from multiple --exlucde-time options to one --include-time). Thanks to Wesley Bowman for the patch. - Check pod syntax. Thanks to Etienne Bersac for the patch. - Add HACKING to document tests. Thanks to Etienne Bersac for the patch. - Drop obsolete --bar-graph option. Thanks to Etienne Bersac for the patch. - Drop misleading .perltidyrc. This file date from 2012 and pgbadger code is far from compliant. perltidy unified diff is 10k lines. Let's drop this. Thanks to Etienne Bersac for the patch. - Fix use of uninitialized value in SQL formatting. Thanks to John Krugger for the report and Jean-paul Argudo for the report. 2017-07-27 - v9.2 This release of pgBadger is a maintenance release that adds some new features. * Add report of checkpoint distance and estimate. * Add support of AWS Redshift keywords to SQL code beautifier. * Add autodetection of log format in remote mode to allow remote parsing of pgbouncer log file together with PostgreSQL log file. There's also some bugs fixes and features enhancements. - Fix reports with histogram that was not showing data upper than the last range. - Fix parsing of journalctl without the the log line number pattern ([%l-n]). Thanks to Christian Schmitt for the report. - Add report of checkpoint distance and estimate. Thanks to jjsantam for the feature request. - Append more information on what is done by script to update CSS and javascript files, tools/updt_embedded_rsc.pl. - Do not warn when all log files are empty and exit with code 0. - Fix build_log_line_prefix_regex() that does not include %n as a lookup in %regex_map. Thanks to ghosthound for the patch. - Change error level of "FATAL: cannot use CSV" to WARNING. Thanks to kong1man for the report. - Fix use of uninitialized value warning. Thanks to Payal for the report. - Add permission denied to error normalization - Update pgbadger to latest commit 5bdc018 of pgFormatter. - Add support for AWS Redshift keywords. Thanks to cavanaug for the feature request. - Fix missing query in temporary file report when the query was canceled. Thanks to Fabrizio de Royes Mello for the report. - Normalize query with binded parameters, replaced with a ?. - Sanity check to avoid end time before start time. Thanks to Christophe Courtois for the patch. - Fix a lot of mystyped words and do some grammatical fixes. Use 'pgBadger' where it refers to the program and not the binary file. Also, use "official" expressions such as PgBouncer, GitHub, and CSS. POD file was synced with README. Thanks to Euler Taveira for the patch. - Menu is broken when --disable-type top_cancelled_info test and closing list must be inside disable_type test. While in it, ident disable_lock test. Thanks to Euler Taveira for the patch. - Fix use of uninitialized value. Thanks to johnkrugger for the report. - Remove test to read log file during log format auto-detection when the file is hosted remotly. Thanks to clomdd for the report. - Add autodetection of log format in remote mode to allow remote parsing of pgbouncer log file together with PostgreSQL log file. - Fix number of sessions wrongly increased after log line validation Thanks to Achilleas Mantzios for the report. - Minor reformatting of the pgBadger Description. - Fix repeated info in documentation. Thanks to cscatolini for the patch. 2017-01-24 - v9.1 This release of pgBadger is a maintenance release that adds some new features. * Add report of error class distribution when SQLState is available in the log_line_prefix (see %e placeholder). * Update SQL Beautifier to pgFormatter v1.6 code. * Improve error message normalization. * Add --normalized-only option to generate a text file containing all normalized queries found in a log with count. * Allow %c (session id) to replace %p (pid) as unique session id. * Add waiting for lock messages to event reports. * Add --start-monday option to start calendar weeks in Monday instead of default to Sunday. There's also some bugs fixes and features enhancements. - Add report of error class distribution when SQLState is available in the log line prefix. Thanks to jacks33 for the feature request. - Fix incremental global index on resize. Thanks to clomdd for the report. - Fix command tag log_line_prefix placeholder %i to allow space character. - Fix --exclude-line options and removing of obsolete directory when retention is enabled and --noreport is used. - Fix typo in "vacuum activity table". Thanks to Nicolas Gollet for the patch. - Fix autovacuum report. Thanks to Nicolas Gollet for the patch. - Fix author of pgbadger's logo - Damien Cazeils and English in comments. Thanks to Thibaut Madelaine for the patch. - Add information about pgbouncer log format in the -f option. Thanks to clomdd for the report. - Add --normalized-only information in documentation. - Fix broken report of date-time introduced in previous patch. - Fix duration/query association when log_duration=on and log_statement=all. Thanks to Eric Jensen for the report. - Fix normalization of messages about advisory lock. Thanks to Thibaut Madelaine for the report. - Fix report of auto_explain output. Thanks to fch77700 for the report. - Fix unwanted log format auto detection with log entry from stdin. Thanks to Jesus Adolfo Parra for the report. - Add left open parentheses to the "stop" chars of regex to look for db client in the prefix to handle the PostgreSQL client string format that includes source port. Thanks to Jon Nelson for the patch. - Fix some spelling errors. Thanks to Jon Nelson for the patch. - Allow %c (session id) to replace %p (pid) as unique session id. Thanks to Jerryliuk for the report. - Allow pgbadger to parse default log_line_prefix that will be probably used in 10.0: '%m [%p] ' - Fix missing first line with interpreter call. - Fix missing Avg values in CSV report. Thanks to Yosuke Tomita for the report. - Fix error message in autodetect_format() method. - Add --start-monday option to start calendar weeks in Monday instead of default to Sunday. Thanks to Joosep Mae for the feature request. - Fix --histo-average option. Thanks to Yves Martin for the report. - Remove plural form of --ssh-option in documentation. Thanks to mark-a-s for the report. - Fix --exclude-time filter and rewrite code to skip unwanted line as well code to update the progress bar. Thanks to Michael Chesterton for the report. - Fix support to %r placeholder in prefix instead of %h. 2016-09-02 - v9.0 This major release of pgBadger is a port to bootstrap 3 and a version upgrade of all resources files (CSS and Javascript). There's also some bugs fixes and features enhancements. Backward compatibility with old incremental report might be preserved. - Sources and licences of resources files are now on a dedicated subdirectory. A script to update their minified version embedded in pgbager script has been added. Thanks to Christoph Berg for the help and feature request. - Try to detect user/database/host from connection strings if log_connection is enabled and log_line_prefix doesn't include them. Extend the regex to autodetect database name, user name, client ip address and application name. The regex now are the following: db => qr/(?:db|database)=([^,]*)/; user => qr/(?:user|usr)=([^,]*)/; client => qr/(?:client|remote|ip|host)=([^,]*)/; appname => qr/(?:app|application)=([^,]*)/; - Add backward compatibility with older version of pgbadger in incremental mode by creating a subdirectory for new CSS and Javascript files. This subdirectory is named with the major version number of pgbadger. - Increase the size of the pgbadger logo that appears too small with the new font size. - Normalize detailed information in all reports. - Fix duplicate copy icon in locks report. - Fix missing chart on histogram of session time. Thanks to Guillaume Lelarge for the report. - Add LICENSE file noting the licenses used by the resource files. Thanks to Christoph Berg for the patch. - Add patch to jqplot library to fix an infinite loop when trying to download some charts. Thanks to Julien Tachoires for the help to solve this issue. - Script tools/updt_embedded_rsc.pl will apply the patch to resource file resources/jquery.jqplot.js and doesn't complain if it has already been applied. - Remove single last comma at end of pie chart dataset. Thanks to Julien Tachoires for the report. - Change display of normalized error - Remove unused or auto-generated files - Update all resources files (js+css) and create a directory to include source of javascript libraries used in pgbadger. There is also a new script tools/updt_embedded_rsc.pl the can be used to generate the minified version of those files and embedded them into pgbadger. This script will also embedded the FontAwesome.otf open truetype font into the fontawesome.css file. 2016-08-27 - v8.3 This is a maintenance release that fix some minor bugs. This release also adds replication command messages statistics to the Events reports. - Fix auto-detection of stderr format with timestamp as epoch (%n). - Fix histogram over multiples days to be cumulative per hour, not an average of the number of event per day. - Fix parsing of remote file that was failing when the file does not exists locally. Thanks to clomdd for the report. - Detect timezones like GMT+3 on CSV logs. Thanks to jacksonfoz for the patch. - Add replication command messages statistics to the Events reports. Thanks to Michael Paquier for the feature request. This is the last minor version of the 8.x series, next major version will include an upgrade of boostrap and jquery library which need some major rewrite. 2016-08-11 version 8.2 This is a maintenance release that fix some minor bug. There is also some performances improvement up to 20% on huge files and some new interesting features: * Multiprocessing can be used with pgbouncer log files. * pgBouncer and PostgreSQL log files can be used together in incremental mode. * With default or same prefix, stderr and syslog file can be parsed together, csvlog format can always be used. * Use a modal dialog window to download graphs as png images. * Add pl/pgSQL function information to queries when available. Here are the complete list of changes: - Fix report of database system messages. - Fix multi line statement concatenation after an error. - Fix box size for report of queries generating the most temporary files and the most waiting queries. - Rewrite code to better handle multi-line queries. - Fix garbage in examples of event queries with error only mode (option -w). Thanks to Thomas Reiss for the report. - Fix getting dataset related to query duration with the use of auto_explain. Thanks to tom__b for the patch. - Use a modal dialog window to download graphs as png images. - Huge rewrite of the incremental mechanism applied to log files to handle PostgreSQL and pgbouncer logs at the same time. - Multiprocess can be used with pgbouncer log. - Add code to remove remaining keyword placeholders tags. - Fix an other possible case of truncated date in LAST_PARSED file Thanks to brafaeloliveira for the report. - Set default scale 1 in pretty_print_number() js function. - Fix auto-detection of pgbouncer files that contain only stats lines. Thanks to Glyn Astill for the patch. - Add date to samples of queries generating most temporary files. - Do not display warning message of empty log when quiet mode is enable. - Fix reading from stdin by disabling pgbouncer format detection. Thanks to Robert Vargason for the patch. - Fix case of duplicate normalized error message with "nonstandard use of ...". - Fix storage of current temporary file related request. - Use the mnemonic rather than a signal number in kill calls. Thanks to Komeda Shinji for the patch. 2016-04-21 version 8.1 This is a maintenance release that fix a major issue introduced with support to pgbouncer that prevent parsing of compressed PostgreSQL log files and adds some improvements. Here are the complete list of changes: - Fix one case where pid file remain after dying. - Add requirement of log_error_verbosity = default to documentation. - Report message "LOG: using stale statistics instead of current ones because stats collector is not responding" in events view. - Remove obsolete days when we are in binary mode with --noreport - Fix wrong report of statements responsible of temporary files. Thanks to Luan Nicolini Marcondes for the report. This patch also exclude line with log level LOCATION to be parsed. - Fix limit on number of sample at report generation and remove pending LAST_PARSED.tmp file. - Update load_stat() function and global variables to support pgbouncer statistics. Update version to 2.0. - Handle more kind or query types. Thanks to julien Rouhaud for the patch. - Fix pgbouncer log parser to handle message: FATAL: the database system is shutting down - Fix whitespace placed in between the E and the quote character. Thanks to clijunky for the report. - Fix a major issue introduced with support to pgbouncer that prevent parsing of compressed PostgreSQL log files. Thanks to Levente Birta for the report. 2016-02-22 version 8.0 This is a major release that adds support to pgbouncer log files. New pgbouncer reports are: * Request Throughput * Bytes I/O Throughput * Queries Average duration * Simultaneous sessions * Histogram of sessions times * Sessions per database * Sessions per user * Sessions per host * Established connections * Connections per database * Connections per user * Connections per host * Most used reserved pools * Most Frequent Errors/Events pgbouncer log files can be parsed together with PostgreSQL logs. It also adds a two new command line options: * --pgbouncer-only to only show pgbouncer related reports. * --rebuild to be able to rebuild all html reports in incremental output directory where binary data files are still available. This release fixes a major bug introduced with journalctl code that was prevented the use of multiprocess feature. Here the complete list of other changes: - Fix progress bar with pgbouncer (only events are increased). - Sort %SYMBOLE hashtable for remove "!=" / "=" bug. Thanks to Nicolas Gollet for the patch. - Fix incorrect numbers on positional parameters in report Queries generating most temporary files. Thanks to Oskar Wiksten for the report. - Update operators list in SQL code beautifier with last update in pgFormatter. Thanks to Laurenz Albe for the report and the list of missing operators. - Cosmetic change to code and add some more debug information. 2016-01-18 version 7.3 This is a maintenance release to fix a major bug that was breaking the incremental mode in pgBadger. It also adds some more reports and features. * Add --timezone=+/-HH to control the timezone used in charts. The javascript library runs at client side so the timezone used is the browser timezone so the displayed time in the charts can be different from the time in the log file. * Add /tmp/pgbadger.pid file to prevent cron jobs overlaping on same log files. * Add command line option --pid-dir to be able to run two pgbadger at the same time by setting an alternate path to the pid file. * Report information about "LOG: skipping analyze of ..." into events reports. * Report message "LOG: sending cancel to blocking autovacuum" into events reports. Useful to look for queries generating autovacuum kill on account of a lock issue. Here the complete list of changes: - Automatically remove obsolete pid file when there is no other pgbadger process running (unix only) - Update documentation about the --timezone command line option. - Add --timezone=+/-HH to control the timezone used in charts. Thanks to CZAirwolf for the report. - Fix Histogram of session times when there is no data. - Fix unclosed test file. - Fix an other case where pgbadger.pid was not removed. - Always display slides part on connections report even if there is no data. - Fix some label on sessions reports - Add remove of pid file at normal ending. - Fix wrong size/offset of log files that was breaking incremental mode. Thanks a lot to CZAirwolf for the report and the help to find the problem. - Add command line option --pid-dir to be able to run two pgbadger at the same time by setting an alternate path to the directory where the pid file will be written. - Add /tmp/pgbadger.pid file to prevent cron jobs overlaping on same log files. - Report information about "LOG: skipping analyze of ..." into events reports. - Report message "LOG: sending cancel to blocking autovacuum" into events reports. Usefull to know which queries generate autovacuum kill on account of a lock issue. - Add more debug information about check log parsing decision. 2016-01-05 version 7.2 This new release fixes some issues especially in temporary files reports and adds some features. * Allow pgBadger to parse natively the journalctl command output * Add new keywords from PG 9.5 for code formating * Add support to %n log_line_prefix option for Unix epoch (PG 9.6) There's also some new command line option: * Adds --journalctl_cmd option to enable this functionality and set the command. Typically: --journalctl_cmd "journalctl -u postgresql-9.4" to parse output of PG 9.4 log Here is the full list of changes/fixes: - Fix missing detailed information (date, db, etc.) in Queries generating the largest temporary files report. - Fix label of sessions histogram. Thanks to Guillaume Lelarge for the patch. - Fix to handle cancelled query that generate more than one temporary file and more generally aggregate size on queries with multiple (> 1GB) temporary files. - Add "Total size" column in Temporary Files Activity table and fix size increment when a query have multiple 1GB temporary file. - Fix temporary file query normalization and examples. - Fix incomplete and wrong queries associated to temporary files when STATEMENT level log line was missing. Thanks to Mael Rimbault for the report. - When -w or --watch-mode is used, message "canceling statement due to statement timeout" s now reported with other errors. - Allow dot in dbname and user name. Thanks to David Turvey for the patch. - Remove use of unmaintained flotr2 javascript chart library and use of jqflot instead. - Fix bad formatting with anonymized values in queries. - Display 0ms instead of 0s when qery time is under the millisecond. Thanks to venkatabn for the report. - Normalize cursor names. Patch from Julien Rouhaud - Fix unregistered client host name with default pattern. Thanks to Eric S. Lucinger Ruiz for the report. - Remove redundant regular expressions. - Tweaking awkward phrasing, correcting subject-verb agreements, typos, and misspellings. Patch from Josh Kupershmid. - Fix potential incorrect creation of subdirectory in incremental mode. - Allow single white space after duration even if this should not appear. - Update copyright. 2015-07-11 version 7.1 This new release fixes some issues and adds a new report: * Distribution of sessions per application It also adds Json operators to SQL Beautifier. Here is the full list of changes/fixes: - Fix unwanted seek on old parsing position when log entry is stdin. Thanks to Olivier Schiavo for the report. - Try to fix a potential issue in log start/end date parsing. Thanks to gityerhubhere for the report. - Fix broken queries with multiline in bind parameters. Thank to Nicolas Thauvin for the report. - Add new report Sessions per application. Thanks to Keith Fiske for the feature request. - Add Json Operators to SQL Beautifier. Thanks to Tom Burnett and Hubert depesz Lubaczewski. - Makefile.PL: changed manpage section from '1' to '1p', fixes #237. Thanks to Cyril Bouthors for the patch. - Update Copyright date-range and installation instructions that was still refering to version 5. Thanks to Steve Crawford for the report. - Fix typo in changelog Note that new official releases must now be downloaded from GitHub and no more from SourceForge. Download at https://github.com/dalibo/pgbadger/releases 2015-05-08 version 7.0 This major release adds some more useful reports and features. * New report about events distribution per 5 minutes. * New per application details (total duration and times executed) for each query reported in Top Queries reports. The details are visible from a new button called "App(s) involved". * Add support to auto_explain extension. EXPLAIN plan will be added together with top slowest queries when available in log file. * Add a link to automatically open the explain plan on http://explain.depesz.com/ * New report on queries cumulated durations per user. * New report about the Number of cancelled queries (graph) * New report about Queries generating the most cancellation (N) * New report about Queries most cancelled. Here is the full list of changes/fixes: - Update documentation with last reports. - Fix number of event samples displayed in event reports. - Add new report about events distribution per x minutes. - Add app=%a default prefix to documentation. - Add reports of "App(s) involved" with top queries. Thanks to Antti Koivisto for the feature request. - Remove newline between a ) and , in the beautifier. - Add link to automatically open the explain plan on http://explain.depesz.com/ - Add support to auto_explain, EXPLAIN plan will be added together with top slowest queries when available in the log file. - Add a graph on distributed duration per user. Thanks to Korriliam for the patch. - Add tree new report: Number of cancelled queries (graph), Queries generating the most cancellation (N) and Queries most cancelled lists. Thanks to Thomas Reiss for the feature request. - Fix case where temporary file statement must be retrieved from the previous LOG statement and not in the following STATEMENT log entry. Thanks to Mael Rimbault for the report. - Add --enable-checksum to show a md5 hash of each reported queries. Thanks to Thomas Reiss for the feature request. 2015-04-13 version 6.4 This new release fixes a major bugs in SQL beautifier which removed operator and adds some useful improvement in anonymization of parameters values. pgBadger will also try to parse the full csvlog when a broken CSV line is encountered. - Make anonymization more useful. Thanks to Hubert depesz Lubaczewski for the patch. - Fix previous patch for csvlog generated with a PostgreSQL version before 9.0. - Try continue CSV parsing after broken CSV line. Thanks to Sergey Burladyan for the patch. - Fix bug in SQL beautifier which removed operator. Thanks to Thomas Reiss for the report. - Fix loop exit, check terminate quickly and correct comments indentation. Thanks to Sergey Burladyan for the patch Please upgrade. 2015-03-27 version 6.3 This new release fixes some bugs and adds some new reports: * A new per user details (total duration and times executed) for each query reported in Top Queries reports. The details are visible from a new button called "User(s) involved". * Add "Average queries per session" and "Average queries duration per session" in Sessions tab of the Global statistics. * Add connection time histogram. * Use bar graph for Histogram of query times and sessions times. There's also some cool new features and options: * Add -L | --logfile-list option to read a list of logfiles from an external file. * Add support to log_timezones with + and - signs for timestamp with milliseconds (%m). * Add --noreport option to instruct pgbadger to not build any HTML reports in incremental mode. pgBadger will only create binary files. * Add auto detection of client=%h or remote=%h from the log so that adding a prefix is not needed when it respect the default of pgbadger. * Redefine sessions duration histogram bound to be more accurate. * Add new option -M | --no-multiline to not collect multi-line statement and avoid storing and reporting garbage when needed. * Add --log-duration option to force pgbadger to associate log entries generated by both log_duration=on and log_statement=all. The pgbadger_tools script have also been improve with new features: * Add a new tool to pgbadger_tool to output top queries in CSV format for follow-up analysis. * Add --explain-time-consuming and --explain-normalized options to generate explain statement about top time consuming and top normalized slowest queries. Here is the full list of changes/fixes: - Update flotr2.min.js to latest github code. - Add per user detail information (total duration and times executed) for each query reported in "Time consuming queries", "Most frequent queries" "and Normalized slowest queries". The details are visible from a new button called "User(s) involved" near the "Examples" button. Thanks to Guillaume Le Bihan for the patch and tsn77130 for the feature request. - pgbadger_tool: add tool to output top queries to CSV format, for follow-up analysis. Thanks to briklen for the patch. - Add geometric operators to SQL beautifier. Thanks to Rodolphe Quiedeville for the report. - Fix non closing session when a process crash with message: "terminating connection because of crash of another server process". Thanks to Mael Rimbault for the report. - Add -L|--logfile-list command line option to read a list of logfiles from a file. Thanks to Hubert depesz Lubaczewski for the feature request. - Automatically remove %q from prefix. Thanks to mbecroft for report. - Do not store DEALLOCATE log entries anymore. - Fix queries histogram where range was not appears in the right order. Thanks to Grzegorz Garlewicz for the report. - Fix min yaxis in histogram graph. Thanks to grzeg1 for the patch. - Add --log-duration command line option to force pgbadger to associate log entries generated by both log_duration = on and log_statement=all. Thanks to grzeg1 for the feature request. - Small typographical corrections. Thanks to Jefferson Queiroz Venerando and Bill Mitchell the patches. - Reformat usage output and add explanation of the --noreport command line option. - Fix documentation about minimal pattern in custom log format. Thanks to Julien Rouhaud for the suggestion. - Add support to log_timezones with + and - signs to timestamp with milliseconds (%m). Thanks to jacksonfoz for the patch. pgbadger was not recognize log files with timezones like 'GMT+3'. - Add --noreport command line option to instruct pgbadger to not build any reports in incremental mode. pgBadger will only create binary files. Thanks to hubert Depesz Lubaczewski for the feature request. - Add time consuming information in tables of Queries per database... Thanks to Thomas for the feature request. - Add more details about the CSV parser error. It now prints the line number and the last parameter that generate the failure. This should allow to see the malformed log entry. - Change substitution markup in attempt to fix a new look-behind assertions error. Thanks to Paolo Cavallini for the report. - Use bar graph for Histogram of query times and sessions times. - Fix wrong count of min/max queries per second. Thanks to Guillaume Lelarge for the report. Add COPY statement to SELECT or INSERT statements statistics following the copy direction (stdin or stdout). - Fix Illegal division by zero at line 3832. Thanks to MarcoTrek for the report. - Add "Average queries per session" and "Average queries duration per session" in Sessions tab of the Global stat. Thanks to Guillaume Lelarge for the feature request. - Reformat numbers in pie graph tracker. Thanks to jirihlinka for the report. - pgbadger_tools: Add --explain-time-consuming and --explain-normalized to generate explain statement about top time consuming and top normalized slowest queries. Thanks to Josh Kupershmid fot the feature request. - Remove everything than error information from json output when -w | --watch-mode is enable. Thanks to jason. - Fix undefined subroutine encode_json when using -x json. Thanks to jason for the report. - Add auto detection of client=%h or remote=%h from the log so that adding a prefix is not needed when it respect the default of pgbadger. - Redefine sessions duration histogram bound to be more accurate. Thanks to Guillaume Lelarge for the report. - Add connection time histogram. Thanks to Guillaume Lelarge for the feature request. - Add new option -M | --no-multiline to not collect multi-line statement to avoid garbage especially on errors that generate a huge report. - Do not return SUCCESS error code 0 when aborted or something fails. Thanks to Bruno Almeida for the patch. 2014-10-07 version 6.2 This is a maintenance release to fix a regression in SQL traffic graphs and fix some other minor issues. The release also add a new option -D or --dns-resolv to map client ip addresses to FQDN without having log_hostname enabled on the postgresql's configuration - Do not display queries in Slowest individual, Time consuming and Normalized slowest queries reports when there is no duration in log file. Display NO DATASET instead. - Fix min/max queries in SQL traffic that was based on duration instead of query count. - Fix wrong unit to Synced files in Checkpoints files report. Thanks to Levente Birta for the report. - Enable allow_loose_quotes in Text::CSV_XS call to fix CSV parsing error when fields have quote inside an unquoted field. Thanks to Josh Berkus for the report. - Add -D | --dns-resolv command line option to replace ip addresses by their DNS name. Be warned that this can slow down pgBagder a lot. Thanks to Jiri Hlinka for the feature request. 2014-09-25 version 6.1 This release fix some issues and adds some new features. It adds a new option -B or --bar-graph to use bar instead of line in graphs. It will also keep tick formatting when zooming. The release also add a new program: pgbadger_tools to demonstrate how to works with pgBadger binary files to build your own new feature. The first tools 'explain-slowest' allow printing of top slowest queries as EXPLAIN statements. There's also additional options to execute automatically the statements with EXPLAIN ANALYZE and get the execution plan. See help of the program for more information or the README file in the tools directory. Some modifications will change certain behavior: - The -T | --title text value will now be displayed instead of the pgBadger label right after the logo. It was previously displayed on mouse over the pgBadger label. Here is the full list of changes/fixes: - Change -T | --title position on pgBadger report. Title now override the pgBadger label. Thanks to Julien Rouhauld for the patch. - Add --file-per-query and --format-query option to write each slowest query in separate file named qryXXX.sql and perform minimal formating of the queries. Thanks to Rodolphe Quiedeville for the patch. - Remove debug query from explain-slowest tool. - Fix surge in sessions number report when an exclusion or inclusion option (dbname, user, appname, etc.) is used. Thanks to suyah for the report. - Fix fatal error when remote log file is 0 size. Thanks to Julien Rouhaud for the report. - Allow pgbadger_tools --explain-slowest to automatically execute the EXPLAIN statements an report the plan. See pgbadger_tools --help for more explanation. - Add --analyze option to replace EXPLAIN statements by EXPLAIN (ANALYZE, VERBOSE, BUFFERS). - Move pgbadger_tools program and README.tools into the tools/ subdirectory with removing the extension. Add more comments and explanations. - Fix case where die with interrupt signal is received when using -e option. Thanks to Lloyd Albin for the report. - Add a new program pgbadger_tools to demonstrate how to deal with pgBadger binary files to build your own new feature. The first one 'explain-slowest' allow printing of top slowest queries as EXPLAIN statements. - Keep tick formatting when zooming. Thanks to Julien Rouhaud for the patch. - Fix automatic detection of rsyslogd logs. Thanks to David Day for the report. - Fix issue in calculating min/max/avg in "General Activity" report. It was build on the sum of queries duration per minutes instead of each duration. Thanks to Jayadevan M for the report. - The same issue remains with percentile that are build using the sum of duration per minutes and doesn't represent the real queries duration. - This commit also include a modification in convert_time() method to reports milliseconds. - Add -B or --bar-graph command line option to use bar instead of line in graph. Thanks to Bart Dopheide for the suggestion. - Fix Checkpoint Wal files usage graph title. 2014-08-08 version 6.0 This new major release adds some new features like automatic cleanup of binary files in incremental mode or maximum number of weeks for reports retention. It improve the incremental mode with allowing the use of multiprocessing with multiple log file. It also adds report of query latency percentile on the general activity table (percentiles are 90, 95, 99). There's also a new output format: JSON. This format is good for sharing data with other languages, which makes it easy to integrate pgBadger's result into other monitoring tools. You may want to expose your reports but not the data, using the --anonymize option pgBadger will be able to anonymize all literal values in the queries. Sometime select to copy a query from the report could be a pain. There's now a click-to-select button in front of each query that allow you to just use Ctrl+C to copy it on clipboard The use of the new -X option also allow pgBadger to write out extra files to the outdir when creating incremental reports. Those files are the CSS and Javascript code normally repeated in each HTLM files. Warning: the behavior of pgBadger in incremental mode has changed. It will now always cleanup the output directory of all the obsolete binary file. If you were using those files to build your own reports, you can prevent pgBadger to remove them by using the --noclean option. Note that if you use the retention feature, all those files in obsolete directories will be removed too. Here is the complete list of changes. - Javascript improvement to use only one call of sql_select and sql_format. Use jQuery selector instead of getElementById to avoid js errors when not found. Thanks to Julien Rouhaud for the patches. - Add -R | --retention command line option to set the maximum number of week reports to preserve in the output directory for incremental mode. Thanks to Kong Man for the feature request. - Session count is immediately decreased when a FATAL error is received in the current session to prevent overcount of simultaneous session number. Thanks to Josh Berkus for the report. - Fix issue in incremental mode when parsing is stopped after rotating log and rotated log has new lines. The new file was not parsed at all. Thanks to CZAirwolf for the report. - Fix revert to single thread when last_line_parsed exists. Thanks to Bruno Almeida for the report. - Fix issue in handling SIGTERM/SIGINT that cause pgbadger to continue. - Add autoclean feature to pgbadger in incremental mode. pgbadger will now removed automatically obsolete binary files unless you specify --noclean at command line. - Add new command line option --anonymize to obscure all literals in queries/errors to hide confidential data. Thanks to wmorancfi for the feature request. - Fix single "SELECT;" as a query in a report. Thanks to Marc Cousin for the report. - Add a copy icon in front of each query in the report to select the entire query. Thanks to Josh Berkus for the feature request. - Fix wrong move to beginning of a file if the file was modified after have been parsed a time. Thanks to Herve Werner for the report. - Allow pgBadger to write out extra files to outdir when creating incremental reports. Require the use of the -X or --extra-files option in incremental mode. Thanks to Matthew Musgrove for the feature request. - Fix incomplete handling of XZ compressed format. - Fix move to offset in incremental mode with multiprocess and incomplete condition when file is smaller than the last offset. Thanks to Herve Werner for the report. - Allow/improve incremental mode with multiple log file and multiprocess. - Fix incorrect location of temporary file storing last parsed line in multiprocess+incremental mode. Thanks to Herve Werner for the report. - Fix remote ssh command error sh: 2: Syntax error: "|" unexpected. Thanks to Herve Werner for the report. - Fix missing database name in samples of top queries reports. Thanks to Thomas Reiss for the report. - Add minimal documentation about JSON output format. - Add execute attribute to pgbadger in the source repository, some may find this more helpful when pgbadger is not installed and executed directly from this repository. - Fix issue with csv log format and incremental mode. Thanks to Suya for the report and the help to solve the issue. There also a fix to support autovacuum statistic with csv format. - Fix bad URL to documentation. Thanks to Rodolphe Quiedeville for the report. - Two minor change to made easier to use Tsung scenario: Remove the first empty line and replace probability by weight. Now it is possible to use the scenario as is with Tsung 1.5. - Fix incremental mode where weeks on index page start on sunday and week reports start on monday. Thanks to flopma and birkosan for the report. - Replace label "More CPU costly" by "Highest CPU-cost". Thanks to Marc Cousin for the suggestion. - Add query latency percentile to General Activity table (percentiles are 90, 95, 99). Thanks to Himanchali for the patch. - Fix typon pgbadger call. Thanks to Guilhem Rambal for the report. - Add JSON support for output format. JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger's result into other monitoring tools like Cacti or Graphite. Thanks to Shanzhang Lan for the patch. - Update documentation about remote mode feature. - Update documentation to inform that the xz utility should be at least in version 5.05 to support the --robot command line option. Thanks to Xavier Millies-Lacroix for the report. - Fix remote logfile parsing. Thanks to Herve Werner for the report. 2014-05-05 version 5.1-1 - Fix parsing of remote log file, forgot to apply some patches. Thank to Herve Werner for the report. 2014-05-04 version 5.1 This new release fixes several issues and adds several new features like: * Support to named PREPARE and EXECUTE queries. They are replaced by the real prepare statement and reported into top queries. * Add new --exclude-line command line option for excluding immediately log entries matching any regex. * Included remote and client information into the most frequent events. * pgBadger is now able to parse remote logfiles using a password less ssh connection and generate locally the reports. * Histogram granularity can be adjusted using the -A command line option. * Add new detail information on top queries to show when the query is a bind query. * Support to logfile compressed using the xz compression format. * Change week/day menu in incremental index, it is now represented as usual with a calendar view per month. * Fix various compatibility issue with Windows and Perl 5.8 Here is the full list of changes: - fixed calendar display and correct typo. Thanks to brunomgalmeida for the patch. - revert to single thread if file is small. Thanks to brunomgalmeida for the patch. - print calendars 4+4+4 instead of 3+4+4+1 when looking at full year. Thanks to brunomgalmeida for the patch. - Add --exclude-line option for excluding log entries with a regex based on the full log line. Thanks to ferfebles for the feature request. - Fix SQL keywords that was beautified twice. - Remove duplicate pg_keyword in SQL beautifier. - Fix increment of session when --disable-session is activated. - Fix missing unit in Checkpoints Activity report when time value is empty. Thanks to Herve Werner for the report. - Fix double information in histogram data when period is the hour. - Add support to named PREPARE and EXECUTE queries. Calls to EXECUTE statements are now replaced by the prepared query and show samples with parameters. Thanks to Brian DeRocher for the feature request. - Included Remote and Client information into the most frequent events examples. Thanks to brunomgalmeida for the patch. - Fix documentation about various awkward phrasings, grammar, and spelling. Consistently capitalize "pgBadger" as such, except for command examples which should stay all-lowercase. Thanks to Josh Kupershmidt for the patch. - Fix incremental mode on Windows by replacing %F and %u POSIX::strftime format to %Y-%m-%d and %w. Thanks to dthiery for the report. - Remove Examples button when there is no examples available. - Fix label on tips in histogram of errors reports. - Fix error details in incremental mode in Most Frequent Errors/Events report. Thanks to Herve Werner for the report. - Fix Sync time value in Checkpoints buffers report. Thanks to Herve Werner for the report. - Fix wrong connections per host count. Thanks to Herve Werner for the report. - Allow pgBadger to parse remote log file using a password less ssh connection. Thanks to Orange OLPS department for the feature request. - Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries or errors occurring per hour. You can now specify the granularity down to the minute. Thanks to Orange OLPS department for the feature request. - Add new detail information on top queries to show when the query is a bind query. Thanks to Orange OLPS department for the feature request. - Fix queries that exceed the size of the container. - Add unit (seconds) to checkpoint write/sync time in the checkpoints activity report. Thanks to Orange OLPS department for the report. - Fix missing -J option in usage. - Fix incomplete lines in split logfile to rewind to the beginning of the line. Thanks to brunomgalmeida for the patch. - Fix tsung output and add tsung xml header sample to output file. - Make it possible to do --sample 0 (prior it was falling back to the default of 3). Thanks to William Moran for the patch. - Fix xz command to be script readable and always have size in bytes: xz --robot -l %f | grep totals | awk "{print $5}" - Add support to logfile compressed by the xz compression format. Thanks to Adrien Nayrat for the patch. - Do not increment queries duration histogram when prepare|parse|bind log are found, but only with execute log. Thanks to Josh Berkus for the report. - Fix normalization of error message about unique violation when creating intermediate dirs. Thanks to Tim Sampson for the report. - Allow use of Perl metacharacters like [..] in application name. Thanks to Magnus Persson for the report. - Fix dataset tip to be displayed above image control button. Thanks to Ronan Dunklau for the fix. - Renamed the Reset bouton to "To Chart" to avoid confusion with unzoom feature. - Fix writing of empty incremental last parsed file. - Fix several other graphs - Fix additional message at end of query or error when it was logged from application output. Thanks to Herve Werner for the report. - Fix checkpoint and vacuum graphs when all dataset does not have all values. Thanks to Herve Werner for the report. - Fix week numbered -1 in calendar view. - Change week/day menu in incremental index, it is now represented as usual with a calendar view per month. Thanks to Thom Brown for the feature request. - Load FileHandle to fix error: Can not locate object method "seek" via package "IO::Handle" with perl 5.8. Thanks to hkrizek for the report. - Fix count of queries in progress bar when there is compressed file and multiprocess is enabled. Thanks to Johnny Tan for the report. - Fix debug message "Start parsing at offset" - Add ordering in queries times histogram. Thanks to Ulf Renman for the report. - Fix various typos. Thanks to Thom Brown for the patch. - Fix Makefile error, "WriteMakefile: Need even number of args at Makefile.PL" with Perl 5.8. Thanks to Fangr Zhang for the report. - Fix some typo in Changelog 2014-02-05 version 5.0 This new major release adds some new features like incremental mode and SQL queries times histogram. There is also a hourly graphic representation of the count and average duration of top normalized queries. Same for errors or events, you will be able to see graphically at which hours they are occurring the most often. The incremental mode is an old request issued at PgCon Ottawa 2012 that concern the ability to construct incremental reports with successive runs of pgBadger. It is now possible to run pgbadger each days or even more, each hours, and have cumulative reports per day and per week. A top index page allow you to go directly to the weekly and daily reports. This mode have been build with simplicity in mind so running pgbadger by cron as follow: 0 23 * * * pgbadger -q -I -O /var/www/pgbadger/ /var/log/postgresql.log is enough to have daily and weekly reports viewable using your browser. You can take a look at a sample report at http://dalibo.github.io/pgbadger/demov5/index.html There's also a useful improvement to allow pgBadger to seek directly to the last position in the same log file after a successive execution. This feature is only available using the incremental mode or the -l option and parsing a single log file. Let's say you have a weekly rotated log file and want to run pgBadger each days. With 2GB of log per day, pgbadger was spending 5 minutes per block of 2 GB to reach the last position in the log, so at the end of the week this feature will save you 35 minutes. Now pgBadger will start parsing new log entries immediately. This feature is compatible with the multiprocess mode using -j option (n processes for one log file). Histogram of query times is a new report in top queries slide that shows the query times distribution during the analyzed period. For example: Range Count Percentage -------------------------------------------- 0-1ms 10,367,313 53.52% 1-5ms 799,883 4.13% 5-10ms 451,646 2.33% 10-25ms 2,965,883 15.31% 25-50ms 4,510,258 23.28% 50-100ms 180,975 0.93% 100-500ms 87,613 0.45% 500-1000ms 5,856 0.03% 1000-10000ms 2,697 0.01% > 10000ms 74 0.00% There is also some graphic and report improvements, like the mouse tracker formatting that have been reviewed. It now shows a vertical crosshair and all dataset values at a time when mouse pointer moves over series. Automatic queries formatting has also been changed, it is now done on double click event as simple click was painful when you want to copy some part of the queries. The report "Simultaneous Connections" has been relabeled into "Established Connections", it is less confusing as many people think that this is the number of simultaneous sessions, which is not the case. It only count the number of connections established at same time. Autovacuum reports now associate database name to the autovacuum and autoanalyze entries. Statistics now refer to "dbname.schema.table", previous versions was only showing the pair "schema.table". This release also adds Session peak information and a report about Simultaneous sessions. Parameters log_connections and log_disconnections must be enabled in postgresql.conf. Complete ChangeLog: - Fix size of SQL queries columns to prevent exceeding screen width. - Add new histogram reports on top normalized queries and top errors or event. It shows at what hours and in which quantity the queries or errors appears. - Add seeking to last parser position in log file in incremental mode. This prevent parsing all the file to find the last line parse from previous run. This only works when parsing a single flat file, -j option is permitted. Thanks to ioguix for the kick. - Rewrite reloading of last log time from binary files. - Fix missing statistics of last parsed queries in incremental mode. - Fix bug in incremental mode that prevent reindexing a previous day. Thanks to Martin Prochazka for the great help. - Fix missing label "Avg duration" on column header in details of Most frequent queries (N). - Add vertical crosshair on graph. - Fix case where queries and events was not updated when using -b and -e command line. Thanks to Nicolas Thauvin for the report. - Fix week sorting on incremental report main index page. Thanks to Martin Prochazka for the report. - Add "Histogram of query times" report to show statistics like 0-100ms : 80%, 100-500ms :14%, 500-1000ms : 3%, > 1000ms : 1%. Thanks to tmihail for the feature request. - Format mouse tracker on graphs to show all dataset value at a time. - Add control of -o vs -O option with incremental mode to prevent wrong use. - Change log level of missing LAST_PARSED.tmp file to WARNING and add a HINT. - Update copyright date to 2014 - Fix empty reports of connections. Thanks to Reeshna Ramakrishnan for the report. - Fix display of connections peak when no connection was reported. - Fix warning on META_MERGE for ExtUtils::MakeMaker < 6.46. Thanks to Julien Rouhaud for the patch. - Add documentation about automatic incremental mode. - Add incremental mode to pgBadger. This mode will build a report per day and a cumulative report per week. It also create an index interface to easiest access to the different report. Must be run, for example, as: pgbadger /var/log/postgresql.log.1 -I -O /var/www/pgbadger/ after a daily PostgreSQL log file rotation. - Add -O | --outdir path to specify the directory where out file must be saved. - Automatic queries formatting is now done on double click event, simple click was painful when you want to copy some part of the queries. Thanks to Guillaume Smet for the feature request. - Remove calls of binmode to force html file output to be utf8 as there is some bad side effect. Thanks to akorotkov for the report. - Remove use of Time::HiRes Perl module as some distributions does not include this module by default in core Perl install. - Fix "Wide character in print" Perl message by setting binmode to :utf8. Thanks to Casey Allen Shobe for the report. - Fix application name search regex to handle application name with space like "pgAdmin III - Query Tool". - Fix wrong timestamps saved with top queries. Thanks to Herve Werner for the report. - Fix missing logs types statitics when using binary mode. Thanks to Herve Werner for the report. - Fix Queries by application table column header: Database replaced by Application. Thanks to Herve Werner for the report. - Add "Max number of times the same event was reported" report in Global stats Events tab. - Replace "Number of errors" by "Number of ERROR entries" and add "Number of FATAL entries". - Replace "Number of errors" by "Number of events" and "Total errors found" by "Total events found" in Events reports. Thanks to Herve Werner for the report. - Fix title error in Sessions per database. - Fix clicking on the info link to not go back to the top of the page. Thanks to Guillaume Smet for the report and solution. - Fix incremental report from binary output where binary data was not loaded if no queries were present in log file. Thanks to Herve Werner for the report. - Fix parsing issue when log_error_verbosity = verbose. Thanks to vorko for the report. - Add Session peak information and a report about Simultaneous sessions. log_connections+log_disconnections must be enabled in postgresql.conf. - Fix wrong requests number in Queries by user and by host. Thanks to Jehan-Guillaume de Rorthais for the report. - Fix issue with rsyslog format failing to parse logs. Thanks to Tim Sampson for the report. - Associate autovacuum and autoanalyze log entry to the corresponding database name. Thanks to Herve Werner for the feature request. - Change "Simultaneous Connections" label into "Established Connections", it is less confusing as many people think that this is the number of simultaneous sessions, which is not the case. It only count the number of connections established at same time. Thanks to Ronan Dunklau for the report. 2013-11-08 version 4.1 This release fixes two major bugs and some others minor issues. There's also a new command line option --exclude-appname that allow exclusion from the report of queries generated by a specific program, like pg_dump. Documentation have been updated with a new chapter about building incremental reports. - Add log_autovacuum_min_duration into documentation in chapter about postgresql configuration directives. Thanks to Herve Werner for the report. - Add chapter about "Incremental reports" into documentation. - Fix reports with per minutes average where last time fraction was not reported. Thanks to Ludovic Levesque and Vincent Laborie for the report. - Fix unterminated comment in information popup. Thanks to Ronan Dunklau for the patch. - Add --exclude-appname command line option to eliminate unwanted traffic generated by a specific application. Thanks to Steve Crawford for the feature request. - Allow external links use into URL to go to a specific report. Thanks to Hubert depesz Lubaczewski for the feature request. - Fix empty reports when parsing compressed files with the -j option which is not allowed with compressed file. Thanks to Vincent Laborie for the report. - Prevent progress bar length to increase after 100% when real size is greater than estimated size (issue found with huge compressed file). - Correct some spelling and grammar in ChangeLog and pgbadger. Thanks to Thom Brown for the patch. - Fix major bug on SQL traffic reports with wrong min value and bad average value on select reports, add min/max for select queries. Thanks to Vincent Laborie for the report. 2013-10-31 - Version 4.0 This major release is the "Say goodbye to the fouine" release. With a full rewrite of the reports design, pgBadger has now turned the HTML reports into a more intuitive user experience and professional look. The report is now driven by a dynamic menu with the help of the embedded Bootstrap library. Every main menu corresponds to a hidden slide that is revealed when the menu or one of its submenus is activated. There's also the embedded font Font Awesome webfont to beautify the report. Every statistics report now includes a key value section that immediately shows you some of the relevant information. Pie charts have also been separated from their data tables using two tabs, one for the chart and the other one for the data. Tables reporting hourly statistics have been moved to a multiple tabs report following the data. This is used with General (queries, connections, sessions), Checkpoints (buffer, files, warnings), Temporary files and Vacuums activities. There's some new useful information shown in the key value sections. Peak information shows the number and datetime of the highest activity. Here is the list of those reports: - Queries peak - Read queries peak - Write queries peak - Connections peak - Checkpoints peak - WAL files usage Peak - Checkpoints warnings peak - Temporary file size peak - Temporary file number peak Reports about Checkpoints and Restartpoints have been merged into a single report. These are almost one in the same event, except that restartpoints occur on a slave cluster, so there was no need to distinguish between the two. Recent PostgreSQL versions add additional information about checkpoints, the number of synced files, the longest sync and the average of sync time per file. pgBadger collects and shows this information in the Checkpoint Activity report. There's also some new reports: - Prepared queries ratio (execute vs prepare) - Prepared over normal queries - Queries (select, insert, update, delete) per user/host/application - Pie charts for tables with the most tuples and pages removed during vacuum. The vacuum report will now highlight the costly tables during a vacuum or analyze of a database. The errors are now highlighted by a different color following the level. A LOG level will be green, HINT will be yellow, WARNING orange, ERROR red and FATAL dark red. Some changes in the binary format are not backward compatible and the option --client has been removed as it has been superseded by --dbclient for a long time now. If you are running a pg_dump or some batch process with very slow queries, your report analysis will be hindered by those queries having unwanted prominence in the report. Before this release it was a pain to exclude those queries from the report. Now you can use the --exclude-time command line option to exclude all traces matching the given time regexp from the report. For example, let's say you have a pg_dump at 13:00 each day during half an hour, you can use pgbadger as follows: pgbadger --exclude-time "2013-09-.* 13:.*" postgresql.log If you are also running a pg_dump at night, let's say 22:00, you can write it as follows: pgbadger --exclude-time '2013-09-\d+ 13:[0-3]' --exclude-time '2013-09-\d+ 22:[0-3]' postgresql.log or more shortly: pgbadger --exclude-time '2013-09-\d+ (13|22):[0-3]' postgresql.log Exclude time always requires the iso notation yyyy-mm-dd hh:mm:ss, even if log format is syslog. This is the same for all time-related options. Use this option with care as it has a high cost on the parser performance. 2013-09-17 - version 3.6 Still an other version in 3.x branch to fix two major bugs in vacuum and checkpoint graphs. Some other minors bugs has also been fixed. - Fix grammar in --quiet usage. Thanks to stephen-a-ingram for the report. - Fix reporting period to starts after the last --last-parsed value instead of the first log line. Thanks to Keith Fiske for the report. - Add --csv-separator command line usage to documentation. - Fix CSV log parser and add --csv-separator command line option to allow change of the default csv field separator, coma, in any other character. - Avoid "negative look behind not implemented" errors on perl 5.16/5.18. Thanks to Marco Baringer for the patch. - Support timestamps for begin/end with fractional seconds (so it'll handle postgresql's normal string representation of timestamps). - When using negative look behind set sub-regexp to -i (not case insensitive) to avoid issues where some upper case letter sequence, like SS or ST. - Change shebang from /usr/bin/perl to /usr/bin/env perl so that user-local (perlbrew) perls will get used. - Fix empty graph of autovacuum and autoanalyze. - Fix checkpoint graphs that was not displayed any more. 2013-07-11 - Version 3.5 Last release of the 3.x branch, this is a bug fix release that also adds some pretty print of Y axis number on graphs and a new graph that groups queries duration series that was shown as second Y axis on graphs, as well as a new graph with number of temporary file that was also used as second Y axis. - Split temporary files report into two graphs (files size and number of file) to no more used a second Y axis with flotr2 - mouse tracker is not working as expected. - Duration series representing the second Y axis in queries graph have been removed and are now drawn in a new "Average queries duration" independant graph. - Add pretty print of numbers in Y axis and mouse tracker output with PB, TB, GB, KB, B units, and seconds, microseconds. Number without unit are shown with P, T, M, K suffix for easiest very long number reading. - Remove Query type reports when log only contains duration. - Fix display of checkpoint hourly report with no entry. - Fix count in Query type report. - Fix minimal statistics output when nothing was load from log file. Thanks to Herve Werner for the report. - Fix several bug in log line parser. Thanks to Den Untevskiy for the report. - Fix bug in last parsed storage when log files was not provided in the right order. Thanks to Herve Werner for the report. - Fix orphan lines wrongly associated to previous queries instead of temporary file and lock logged statement. Thanks to Den Untevskiy for the report. - Fix number of different samples shown in events report. - Escape HTML tags on error messages examples. Thanks to Mael Rimbault for the report. - Remove some temporary debug informations used with some LOG messages reported as events. - Fix several issues with restartpoint and temporary files reports. Thanks to Guillaume Lelarge for the report. - Fix issue when an absolute path was given to the incremental file. Thanks to Herve Werner for the report. - Remove creation of incremental temp file $tmp_last_parsed when not running in multiprocess mode. Thanks to Herve Werner for the report. 2013-06-18 - Version 3.4 This release adds lot of graphic improvements and a better rendering with logs over few hours. There's also some bug fixes especially on report of queries that generate the most temporary files. - Update flotr2.min.js to latest github code. - Add mouse tracking over y2axis. - Add label/legend information to ticks displayed on mouseover graphs. - Fix documentation about log_statement and log_min_duration_statement. Thanks to Herve Werner for the report. - Fix missing top queries for locks and temporary files in multiprocess mode. - Cleanup code to remove storage of unused information about connection. - Divide the huge dump_as_html() method with one method per each report. - Checkpoints, restart points and temporary files are now drawn using a period of 5 minutes per default instead of one hour. Thanks to Josh Berkus for the feature request. - Change fixed increment of one hour to five minutes on queries graphs "SELECT queries" and "Write queries". Remove graph "All queries" as, with a five minutes increment, it duplicates the "Queries per second". Thanks to Josh Berkus for the feature request. - Fix typos. Thanks to Arsen Stasic for the patch. - Add default HTML charset to utf-8 and a command line option --charset to be able to change the default. Thanks to thomas hankeuhh for the feature request. - Fix missing temporary files query reports in some conditions. Thanks to Guillaume Lelarge and Thomas Reiss for the report. - Fix some parsing issue with log generated by pg 7.4. - Update documentation about missing new reports introduced in previous version 3.3. Note that it should be the last release of the 3.x branch unless there's major bug fixes, but next one will be a major release with a completely new design. 2013-05-01 - Version 3.3 This release adds four more useful reports about queries that generate locks and temporary files. An other new report about restart point on slaves and several bugs fix or cosmetic change. Support to parallel processing under Windows OS has been removed. - Remove parallel processing under Windows platform, the use of waitpid is freezing pgbadger. Thanks to Saurabh Agrawal for the report. I'm not comfortable with that OS this is why support have been removed, if someone know how to fix that, please submit a patch. - Fix Error in tempfile() under Windows. Thanks to Saurabh Agrawal for the report. - Fix wrong queries storage with lock and temporary file reports. Thanks to Thomas Reiss for the report. - Add samples queries to "Most frequent waiting queries" and "Queries generating the most temporary files" report. - Add two more reports about locks: 'Most frequent waiting queries (N)", and "Queries that waited the most". Thanks to Thomas Reiss for the patch. - Add two reports about temporary files: "Queries generating the most temporary files (N)" and "Queries generating the largest temporary files". Thanks to Thomas Reiss for the patch. - Cosmetic change to the Min/Max/Avg duration columns. - Fix report of samples error with csvlog format. Thanks to tpoindessous for the report. - Add --disable-autovacuum to the documentation. Thanks to tpoindessous for the report. - Fix unmatched ) in regex when using %s in prefix. - Fix bad average size of temporary file in Overall statistics report. Thanks to Jehan Guillaume de Rorthais for the report. - Add restartpoint reporting. Thanks to Guillaume Lelarge for the patch. - Made some minor change in CSS. - Replace %% in log line prefix internally by a single % so that it could be exactly the same than in log_line_prefix. Thanks to Cal Heldenbrand for the report. - Fix perl documentation header, thanks to Cyril Bouthors for the patch. 2013-04-07 - Version 3.2 This is mostly a bug fix release, it also adds escaping of HTML code inside queries and the adds Min/Max reports with Average duration in all queries reports. - In multiprocess mode, fix case where pgbadger does not update the last-parsed file and do not take care of the previous run. Thanks to Kong Man for the report. - Fix case where pgbadger does not update the last-parsed file. Thanks to Kong Man for the report. - Add CDATA to make validator happy. Thanks to Euler Taveira de Oliveira for the patch. - Some code review by Euler Taveira de Oliveira, thanks for the patch. - Fix case where stat were multiplied by N when -J was set to N. Thanks to thegnorf for the report. - Add a line in documentation about log_statement that disable log_min_duration_statement when it is set to all. - Add quick note on how to contribute, thanks to Damien Clochard for the patch. - Fix issue with logs read from stdin. Thanks to hubert depesz lubaczewski for the report. - Force pgbadger to not try to beautify queries bigger than 10kb, this will take too much time. This value can be reduce in the future if hang with long queries still happen. Thanks to John Rouillard for the report. - Fix an other issue in replacing bind param when the bind value is alone on a single line. Thanks to Kjeld Peters for the report. - Fix parsing of compressed files together with uncompressed files using the the -j option. Uncompressed files are now processed using split method and compressed ones are parsed per one dedicated process. - Replace zcat by gunzip -c to fix an issue on MacOsx. Thanks to Kjeld Peters for the report. - Escape HTML code inside queries. Thanks to denstark for the report. - Add Min/Max in addition to Average duration values in queries reports. Thanks to John Rouillard fot the feature request. - Fix top slowest array size with binary format. - Fix an other case with bind parameters with value in next line and the top N slowest queries that was repeated until N even if the real number of queries was lower. Thanks to Kjeld Peters for the reports. - Fix non replacement of bind parameters where there is line breaks in the parameters, aka multiline bind parameters. Thanks to Kjeld Peters for the report. - Fix error with seekable export tag with Perl v5.8. Thanks to Jeff Bohmer for the report. - Fix parsing of non standard syslog lines begining with a timestamp like "2013-02-28T10:35:11-05:00". Thanks to Ryan P. Kelly for the report. - Fix issue #65 where using -c | --dbclient with csvlog was broken. Thanks to Jaime Casanova for the report. - Fix empty report in watchlog mode (-w option). 2013-02-21 - Version 3.1 This is a quick release to fix missing reports of most frequent errors and slowest normalized queries in previous version published yesterday. - Fix empty report in watchlog mode (-w option). - Force immediat die on command line options error. - Fix missing report of most frequent events/errors report. Thanks to Vincent Laborie for the report. - Fix missing report of slowest normalized queries. Thanks to Vincent Laborie for the report. - Fix display of last print of progress bar when quiet mode is enabled. 2013-02-20 - Version 3.0 This new major release adds parallel log processing by using as many cores as wanted to parse log files, the performances gain is directly related to the number of cores specified. There's also new reports about autovacuum/autoanalyze informations and many bugs have been fixed. - Update documentation about log_duration, log_min_duration_statement and log_statement. - Rewrite dirty code around log timestamp comparison to find timestamp of the specified begin or ending date. - Remove distinction between logs with duration enabled from variables log_min_duration_statement and log_duration. Commands line options --enable-log_duration and --enable-log_min_duration have been removed. - Update documentation about parallel processing. - Remove usage of Storable::file_magic to autodetect binary format file, it is not include in core perl 5.8. Thanks to Marc Cousin for the report. - Force multiprocess per file when files are compressed. Thanks to Julien Rouhaud for the report. - Add progress bar logger for multiprocess by forking a dedicated process and using pipe. Also fix some bugs in using binary format that duplicate query/error samples per process. - chmod 755 pgbadger - Fix checkpoint reports when there is no checkpoint warnings. - Fix non report of hourly connections/checkpoint/autovacuum when not query is found in log file. Thanks to Guillaume Lelarge for the report. - Add better handling of signals in multiprocess mode. - Add -J|--job_per_file command line option to force pgbadger to use one process per file instead of using all to parse one file. Useful to have better performances with lot of small log file. - Fix parsing of orphan lines with stderr logs and log_line_prefix without session information into the prefix (%l). - Update documentation about -j | --jobs option. - Allow pgbadger to use several cores, aka multiprocessing. Add options -j | --jobs option to specify the number of core to use. - Add autovacuum and autoanalyze infos to binary format. - Fix case in SQL code highlighting where QQCODE temp keyword was not replaced. Thanks to Julien Ruhaud for the report. - Fix CSS to draw autovacuum graph and change legend opacity. - Add pie graph to show repartition of number of autovacuum per table and number of tuples removed by autovacuum per table. - Add debug information about selected type of log duration format. - Add report of tuples/pages removed in report of Vacuums by table. - Fix major bug on syslog parser where years part of the date was wrongly extracted from current date with logs generated in 2012. - Fix issue with Perl 5.16 that do not allow "ss" inside look-behind assertions. Thanks to Cedric for the report. - New vacuum and analyze hourly reports and graphs. Thanks to Guillaume Lelarge for the patch. UPGRADE: if you are running pgbadger by cron take care if you were using one of the following option: --enable-log_min_duration and --enable-log_duration, they have been removed and pgbadger will refuse to start. 2013-01-17 - Version 2.3 This release fixes several major issues especially with csvlog and a memory leak with log parsing using a start date. There's also several improvement like new reports of number of queries by database and application. Mouse over reported queries will show database, user, remote client and application name where they are executed. A new binary input/output format have been introduced to allow saving or reading precomputed statistics. This will allow incremental reports based on periodical runs of pgbader. This is a work in progress fully available with next coming major release. Several SQL code beautifier improvement from pgFormatter have also been merged. - Clarify misleading statement about log_duration: log_duration may be turned on depending on desired information. Only log_statement must not be on. Thanks to Matt Romaine for the patch. - Fix --dbname and --dbuser not working with csvlog format. Thanks to Luke Cyca for the report. - Fix issue in SQL formatting that prevent left back indentation when major keywords were found. Thanks to Kevin Brannen for the report. - Display 3 decimals in time report so that ms can be seen. Thanks to Adam Schroder for the request. - Force the parser to not insert a new line after the SET keyword when the query begin with it. This is to preserve the single line with queries like SET client_encoding TO "utf8"; - Add better SQL formatting of update queries by adding a new line after the SET keyword. Thanks to pilat66 for the report. - Update copyright and documentation. - Queries without application name are now stored under others application name. - Add report of number of queries by application if %a is specified in the log_line_prefix. - Add link menu to the request per database and limit the display of this information when there is more than one database. - Add report of requests per database. - Add report of user,remote client and application name to all request info. - Fix memory leak with option -b (--begin) and in incremental log parsing mode. - Remove duration part from log format auto-detection. Thanks to Guillaume Lelarge for the report. - Fix a performance issue on prettifying SQL queries that makes pgBagder several time slower that usual to generate the HTML output. Thanks to Vincent Laborie for the report. - Add missing SQL::Beautify paternity. - Add 'binary' format as input/output format. The binary output format allows to save log statistics in a non human readable file instead of an HTML or text file. These binary files might then be used as regular input files, combined or not, to produce a html or txt report. Thanks to Jehan Guillaume de Rorthais for the patch. - Remove port from the session regex pattern to match all lines. - Fix the progress bar. It was trying to use gunzip to get real file size for all formats (by default). Unbreak the bz2 format (that does not report real size) and add support for zip format. Thanks to Euler Taveira de Oliveira fort the patch. - Fix some typos and grammatical issues. Thanks to Euler Taveira de Oliveira fort the patch. - Improve SQL code highlighting and keywords detection merging change from pgFormatter project. - Add support to hostname or ip address in the client detection. Thanks to stuntmunkee for the report. - pgbadger will now only reports execute statement of the extended protocol (parse/bind/execute). Thanks to pierrestroh for the report. - Fix numerous typos as well as formatting and grammatical issues. Thanks to Thom Brown for the patch. - Add backward compatibility to obsolete --client command line option. If you were using the short option -c nothing is changed. - Fix issue with --dbclient and %h in log_line_prefix. Thanks to Julien Rouhaud for the patch. - Fix multiline progress bar output. - Allow usage of a dash into database, user and application names when prefix is used. Thanks to Vipul for the report. - Mouse over queries will now show in which database they are executed in the overviews (Slowest queries, Most frequent queries, etc. ). Thank to Dirk-Jan Bulsink for the feature request. - Fix missing keys on %cur_info hash. Thanks to Marc Cousin for the report. - Move opening file handle to log file into a dedicated function. Thanks to Marc Cousin for the patch. - Replace Ctrl+M by printable \r. Thanks to Marc Cousin for the report. 2012-11-13 - Version 2.2 This release add some major features like tsung output, speed improvement with csvlog, report of shut down events, new command line options to generate report excluding some user(s), to build report based on select queries only, to specify regex of the queries that must only be included in the report and to remove comments from queries. Lot of bug fixes, please upgrade. - Update PostgreSQL keywords list for 9.2 - Fix number of queries in progress bar with tsung output. - Remove obsolete syslog-ng and temporary syslog-ll log format added to fix some syslog autodetection issues. There is now just one syslog format: syslog, differences between syslog formats are detected and the log parser is adaptive. - Add comment about the check_incremental_position() method - Fix reports with empty graphs when log files were not in chronological order. - Add report of current total of queries and events parsed in progress bar. Thanks to Jehan-Guillaume de Rorthais for the patch. - Force pgBadger to use an require the XS version of Text::CSV instead of the Pure Perl implementation. It is a good bit faster thanks to David Fetter for the patch. Note that using csvlog is still a bit slower than syslog or stderr log format. - Fix several issue with tsung output. - Add report of shut down events - Add debug information on command line used to pipe compressed log file when -v is provide. - Add -U | --exclude-user command line option to generate report excluded user. Thanks to Birta Levente for the feature request. - Allow some options to be specified multiple time or be written as a coma separated list of value, here are these options: --dbname, --dbuser, --dbclient, --dbappname, --exclude_user. - Add -S | --select-only option to build report only on select queries. - Add first support to tsung output, see usage. Thanks to Guillaume Lelarge for the feature request. - Add --include-query and --include-file to specify regex of the queries that must only be included in the report. Thanks to Marc Cousin for the feature request. - Fix auto detection of log_duration and log_min_duration_statement format. - Fix parser issue with Windows logs without timezone information. Thanks to Nicolas Thauvin for the report. - Fix bug in %r = remote host and port log line prefix detection. Thanks to Hubert Depesz Lubaczewski for the report. - Add -C | --nocomment option to remove comment like /* ... */ from queries. Thanks to Hubert Depesz Lubaczewski for the feature request. - Fix escaping of log_line_prefix. Thanks to Hubert Depesz Lubaczewski for the patch. - Fix wrong detection of update queries when a query has a object names containing update and set. Thanks to Vincent Laborie for the report. 2012-10-10 - Version 2.1 This release add a major feature by allowing any custom log_line_prefix to be used by pgBadger. With stderr output you at least need to log the timestamp (%t) the pid (%p) and the session/line number (%l). Support to log_duration instead of log_min_duration_statement to allow reports simply based on duration and count report without query detail and report. Lot of bug fixes, please upgrade asap. - Add new --enable-log_min_duration option to force pgbadger to use lines generated by the log_min_duration_statement even if the log_duration format is autodetected. Useful if you use both but do not log all queries. Thanks to Vincent Laborie for the feature request. - Add syslog-ng format to better handle syslog traces with notation like: [ID * local2.info]. It is autodetected but can be forced in the -f option with value set to: syslog-ng. - Add --enable-log_duration command line option to force pgbadger to only use the log_duration trace even if log_min_duration_statement traces are autodetected. - Fix display of empty hourly graph when no data were found. - Remove query type report when log_duration is enabled. - Fix a major bug in query with bind parameter. Thanks to Marc Cousin for the report. - Fix detection of compressed log files and allow automatic detection and uncompress of .gz, .bz2 and .zip files. - Add gunzip -l command to find the real size of a gzip compressed file. - Fix log_duration only reports to not take care about query detail but just count and duration. - Fix issue with compressed csvlog. Thanks to Philip Freeman for the report. - Allow usage of log_duration instead of log_min_duration_statement to just collect statistics about the number of queries and their time. Thanks to Vincent Laborie for the feature request. - Fix issue on syslog format and autodetect with additional info like: [ID * local2.info]. Thanks to kapsalar for the report. - Removed unrecognized log line generated by deadlock_timeout. - Add missing information about unsupported csv log input from stdin. It must be read from a file. Thank to Philip Freeman for the report. - Fix issue #28: Illegal division by zero with log file without query and txt output. Thanks to rlowe for the report. - Update documentation about the -N | --appname option. - Rename --name option into --appname. Thanks to Guillaume Lellarge for the patch. - Fix min/max value in xasis that was always represented 2 days by default. Thanks to Casey Allen Shobe for the report. - Fix major bug when running pgbadger with the -e option. Thanks to Casey Allen Shobe for the report and the great help - Change project url to http://dalibo.github.com/pgbadger/. Thanks to Damien Clochard for this new hosting. - Fix lot of issues in CSV parser and force locale to be C. Thanks to Casey Allen Shobe for the reports. - Improve speed with custom log_line_prefix. - Merge pull request #26 from elementalvoid/helpdoc-fix - Fixed help text for --exclude-file. Old help text indicated that the option name was --exclude_file which was incorrect. - Remove the obsolete --regex-user and --regex-db options that was used to specify a search pattern in the log_line_prefix to find the user and db name. This is replaced by the --prefix option. - Replace Time column report header by Hour. - Fix another issue in log_line_prefix parser with stderr format - Add a more complex example using log_line_prefix - Fix log_line_prefix issue when using timepstamp with millisecond. - Add support to use any custom log_line_prefix with new option -p or --prefix. See README for an example. - Fix false autodetection of CSV format when log_statement is enable or in possible other cases. This was resulting in error: "FATAL: cannot use CSV". Thanks to Thomas Reiss for the report. - Fix display of empty graph of connections per seconds - Allow character : in log line prefix, it will no more break the log parsing. Thanks to John Rouillard for the report. - Add report of configuration parameter changes into the errors report and change errors report by events report to handle important messages that are not errors. - Allow pgbadger to recognize " autovacuum launcher" messages. 2012-08-21 - version 2.0 This major version adds some changes not backward compatible with previous versions. Options -p and -g are not more used as progress bar and graphs generation are enabled by default now. The obsolete -l option use to specify the log file to parse has been reused to specify an incremental file. Outside these changes and some bug fix there's also new features: * Using an incremental file with -l option allow to parse multiple time a single log file and to "seek" at the last line parsed during the previous run. Useful if you have a log rotation not sync with your pgbadger run. For exemple you can run somthing like this: pgbadger `find /var/log/postgresql/ -name "postgresql*" -mtime -7 -type f` \ -o report_`date +%F`.html -l /var/run/pgbadger/last_run.log * All queries diplayed in the HTML report are now clickable to display or hide a nice SQL query format. This is called SQL format beautifier. * CSV log parser have been entirely rewritten to handle csv with multiline. Every one should upgrade. - Change license from BSD like to PostgreSQL license. Request from Robert Treat. - Fix wrong pointer on Connections per host menu. Reported by Jean-Paul Argudo. - Small fix for sql formatting adding scrollbars. Patch by Julien Rouhaud. - Add SQL format beautifier on SQL queries. When you will click on a query it will be beautified. Patch by Gilles Darold - The progress bar is now enabled by default, the -p option has been removed. Use -q | --quiet to disable it. Patch by Gilles Darold. - Graphs are now generated by default for HTML output, option -g as been remove and option -G added to allow disabling graph generation. Request from Julien Rouhaud, patch by Gilles Darold. - Remove option -g and -p to the documentation. Patch by Gilles Darold. - Fix case sensitivity in command line options. Patch by Julien Rouhaud. - Add -T|--title option to change report title. Patch by Yury Bushmelev. - Add new option --exclude-file to exclude specific commands with regex stated in a file. This is a rewrite by Gilles Darold of the neoeahit (Vipul) patch. - CSV log parser have been entirely rewritten to handle csv with multi line, it also adds approximative duration for csvlog. Reported by Ludhimila Kendrick, patch by Gilles Darold. - Alphabetical reordering of options list in method usage() and documentation. Patch by Gilles Darold. - Remove obsolete -l | --logfile command line option, the -l option will be reused to specify an incremental file. Patch by Gilles Darold. - Add -l | --last-parsed options to allow incremental run of pgbadger. Patch by Gilles Darold. - Replace call to timelocal_nocheck by timegm_nocheck, to convert date time into second from the epoch. This should fix timezone issue. Patch by Gilles Darold. - Change regex on log parser to allow missing ending space in log_line_prefix. This seems a common mistake. Patch by Gilles Darold. - print warning when an empty log file is found. Patch by Gilles Darold. - Add perltidy rc file to format pgbadger Perl code. Patch from depesz. 2012-07-15 - version 1.2 This version adds some reports and fixes a major issue in log parser. Every one should upgrade. - Rewrite this changelog to be human readable. - Add -v | --verbose to enable debug mode. It is now disable by default - Add hourly report of checkpoint warning when checkpoints are occuring too frequently, it will display the hourly count and the average occuring time. - Add new report that sums the messages by log types. The report shows the number of messages of each log type, and a percentage. It also displays a pie graph. Patch by Guillaume Lelarge. - Add missing pie graph on locks by type report. - Format pie mouse track to display values only. - Fix graph download button id on new connection graph. - Add trackFormatter to flotr2 line graphs to show current x/y values. - Fix issue on per minute minimum value. - Add a note about Windows Os and zcat as well as a more general note about using compressed log file in other format than gzip. - Complete rewrite of the log parser to handle unordered log lines. Data are now stored by pid before and added to the global statistics at end. Error report now include full details, statements, contexts and hints when available. Deadlock are also fully reported with the concerned queries. - Fix miss handling of multi lines queries on syslog. - Add -a|--average option to configure the per minutes average interval for queries and connexions. If you want the average to be calculated each minutes instead of the 5 per default, use --average 1 or for the default --average 5. If you want average per hour set it to 60. - Add hourly statistics of connections and sessions as well as a chart about the number of connection per second (5 minutes average). - Allow OTHERS type of queries lower than 2% to be include in the sum of types < 2%. - Add autodetection of syslog ident name if different than the default "postgres" and that there is just one ident name in the log. - Remove syslog replacement of tabulation by #011 still visible when there was multiple tabulation. - Fix autodetection of log format syslog with single-digit day number in date. - Add ChangeLog to MANIFEST and change URI in html footer. - Check pgBadger compatibility with Windows Oses. Run perfectly. 2012-07-04 - version 1.1 This release fixes lot of issues and adds several main features. New feature: - Add possibility to get log from stdin - Change syslog parsing regex to allow log timestamp in log_line_prefix very often forgotten when log destination is changed from stderr to syslog. - Add documentation for the -z | --zcat command line option. - Allow `zcat` location to be specified via `--zcat` - David E. Wheeler - Add --disable-session,--disable-connection and disable-checkpoint command line options to remove their respective reports from the output - Add --disable-query command line option to remove queries statistics from the output - Add --disable-hourly command line option to remove hourly statistics from the output - Add --disable-error command line option to remove error report from the output - Add --exclude-query option to exclude types of queries by specifying a regex - Set thousand separator and decimal separator to be locale dependant - Add -w option to only report errors - Add Makefile.PL and full POD documentation to the project - Allow multiple log files from command line - Add simple csvlog support - Alex Hunsaker - Hourly report for temporary files and checkpoints have moved in a separate table. - Add hourly connections and sessions statistics. - Add a chart about the number of connections per seconds. Bug fix: - Add information about log format requirement (lc_message = 'C'). Reported by Alain Benard. - Fix for begin/end dates with single digit day using syslog. Patch by Joseph Marlin. - Fix handle of syslog dates with single-digit day number. Patch by Denis Orlikhin. - Fix many English syntax in error messages and documentation. Patch by Joseph Marlin. - Fix non terminated TH html tag in checkpoint hourly table. Reported by Joseph Marlin. - "Log file" section will now only report first and last log file parsed - Fix empty output in hourly temporary file stats. - Fix wrapping query that goes out of the table and makes the window scroll horizontally. Asked by Isaac Reuben. - Fix code where != was replaced by $$CLASSSY0A$$!=$$CLASSSY0B$$ in the output. Reported by Isaac Reuben - Fix and review text report output. - Fix an issue in SQL code highligh replacement. - Complete review of the HTML output. - Add .gitignore for swap files. Patch by Vincent Picavet - Fix wrong variable for user and database filter. Patch by Vincent Picavet. - Change default regexp for user and db to be able to detect both. Patch by Vincent Picavet. - Fix false cur_date when using syslog and allow -b and -e options to work. Patch by Vincent Picavet. - Fix some case where logs where not detected as PostgreSQL log lines. - Added explanation for --begin and --end datetime setting. Patch by ragged. - Added -v / --version. Patch by ragged. - Fix usage information and presentation in README file. 2012-05-04 - version to 1.0 First public release of pgBadger. New feature: - Add graph of ckeckpoint Wal files usage (added, removed, recycled). - Add --image-format to allow the change of the default png image format to jpeg. - Allow download of all pie graphics as images. - Add --pie-limit to sum all data lower than this percentage limit to avoid label overlap. - Allow download of graphics as PNG images. - Replace GD::Graph by the Flotr2 javascript library to draw graphics. Patch by Guillaume Lelarge - Add pie graphs for session, database, user and host. Add a --quiet option to remove debug output and --progress to show a progress bar during log parsing - Add pie graph for Queries by type. - Add graph for checkpoint write buffer per hours - Allow log parsing without any log_line_prefix and extend it to be defined by the user. Custom log_line prefix can be parsed using user defined regex with command line option --regex-db and --regex-user. For exemple the default regex of pgbadger to parse user and db name from log_line_prefix can be written like this: pgbadger -l mylogfile.log --regex-user="user=([^,]*)," \ --regex-db="db=([^,]*)" - Separe log_line_prefix from log level part in the parser to extend log_line_prefix parsing - If there is just one argument, assume it is the logfile and use default value for all other parameters - Add autodetection of log format (syslog or stderr) if none is given with option -f - Add --outfile option to dump output to a file instead of stdout. Default filename is out.html or out.txt following the output format. To dump to stdout set filename to - - Add --version command line option to show current pgbadger version. Bug fix: - Rearrange x and y axis - Fix legend opacity on graphics - Rearrange Overall stats view - Add more "normalization" on errors messages - Fix samples error with normalyzed error instead of real error message - Fix an other average size of temporary file decimal limit - Force quiet mode when --progress is used - Fix per sessions graphs - Fix sort order of days/hours into hours array - Fix sort order of days into graphics - Remove display of locks, sessions and connections statistics when none are available - Fix display of empty column of checkpoint when no checkpoint was found in log file pgbadger-11.7/HACKING.md000066400000000000000000000027171417325540000146130ustar00rootroot00000000000000# Contributing on pgBadger Thanks for your attention on pgBadger ! You need Perl Module JSON::XS to run the full test suite. You can install it on a Debian like system using: sudo apt-get install libjson-xs-perl or in RPM like system using: sudo yum install perl-JSON-XS pgBadger has a TAP compatible test suite executed by `prove`: $ prove t/01_lint.t ......... ok t/02_basics.t ....... ok t/03_consistency.t .. ok All tests successful. Files=3, Tests=13, 6 wallclock secs ( 0.01 usr 0.01 sys + 5.31 cusr 0.16 csys = 5.49 CPU) Result: PASS $ or if you prefer to run test manually: $ perl Makefile.PL && make test Checking if your kit is complete... Looks good Generating a Unix-style Makefile Writing Makefile for pgBadger Writing MYMETA.yml and MYMETA.json cp pgbadger blib/script/pgbadger "/usr/bin/perl" -MExtUtils::MY -e 'MY->fixin(shift)' -- blib/script/pgbadger PERL_DL_NONLAZY=1 "/usr/bin/perl" "-MExtUtils::Command::MM" "-MTest::Harness" "-e" "undef *Test::Harness::Switches; test_harness(0, 'blib/lib', 'blib/arch')" t/*.t t/01_lint.t ......... ok t/02_basics.t ....... ok t/03_consistency.t .. ok All tests successful. Files=3, Tests=13, 6 wallclock secs ( 0.03 usr 0.00 sys + 5.39 cusr 0.14 csys = 5.56 CPU) Result: PASS $ make clean && rm Makefile.old Please contribute a regression test when you fix a bug or add a feature. Thanks! pgbadger-11.7/LICENSE000066400000000000000000000016161417325540000142270ustar00rootroot00000000000000Copyright (c) 2012-2022, Gilles Darold Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL Darold BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Darold HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Darold SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Darold HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pgbadger-11.7/MANIFEST000066400000000000000000000001211417325540000143410ustar00rootroot00000000000000LICENSE Makefile.PL MANIFEST META.yml pgbadger README doc/pgBadger.pod ChangeLog pgbadger-11.7/META.yml000066400000000000000000000003721417325540000144710ustar00rootroot00000000000000name: pgBadger version: 11.7 version_from: pgbadger installdirs: site recommends: Text::CSV_XS: 0 JSON::XS: 0 distribution_type: script generated_by: ExtUtils::MakeMaker version 6.17 pgbadger-11.7/Makefile.PL000066400000000000000000000043311417325540000151710ustar00rootroot00000000000000use ExtUtils::MakeMaker; # See lib/ExtUtils/MakeMaker.pm for details of how to influence # the contents of the Makefile that is written. use strict; my @ALLOWED_ARGS = ('INSTALLDIRS','DESTDIR'); # Parse command line arguments and store them as environment variables while ($_ = shift) { my ($k,$v) = split(/=/, $_, 2); if (grep(/^$k$/, @ALLOWED_ARGS)) { $ENV{$k} = $v; } } $ENV{DESTDIR} =~ s/\/$//; # Default install path my $DESTDIR = $ENV{DESTDIR} || ''; my $INSTALLDIRS = $ENV{INSTALLDIRS} || 'site'; my %merge_compat = (); if ($ExtUtils::MakeMaker::VERSION >= 6.46) { %merge_compat = ( 'META_MERGE' => { resources => { homepage => 'http://pgbadger.darold.net/', repository => { type => 'git', git => 'git@github.com:darold/pgbadger.git', web => 'https://github.com/darold/pgbadger', }, }, } ); } sub MY::postamble { return <<'EOMAKE'; USE_MARKDOWN=$(shell which pod2markdown) README: doc/pgBadger.pod pod2text $^ > $@ ifneq ("$(USE_MARKDOWN)", "") cat doc/pgBadger.pod | grep "=head1 " | sed 's/^=head1 \(.*\)/- [\1](#\1)/' | sed 's/ /-/g' | sed 's/--/- /' > $@.md sed -i '1s/^/### TABLE OF CONTENTS\n\n/' $@.md echo >> $@.md pod2markdown $^ | sed 's/^## /#### /' | sed 's/^# /### /' >> $@.md else $(warning You must install pod2markdown to generate README.md from doc/pgBadger.pod) endif .INTERMEDIATE: doc/synopsis.pod doc/synopsis.pod: Makefile pgbadger echo "=head1 SYNOPSIS" > $@ ./pgbadger --help >> $@ echo "=head1 DESCRIPTION" >> $@ sed -i.bak 's/ +$$//g' $@ rm $@.bak .PHONY: doc/pgBadger.pod doc/pgBadger.pod: doc/synopsis.pod Makefile sed -i.bak '/^=head1 SYNOPSIS/,/^=head1 DESCRIPTION/d' $@ sed -i.bak '4r $<' $@ rm $@.bak EOMAKE } WriteMakefile( 'DISTNAME' => 'pgbadger', 'NAME' => 'pgBadger', 'VERSION_FROM' => 'pgbadger', 'dist' => { 'COMPRESS'=>'gzip -9f', 'SUFFIX' => 'gz', 'ZIP'=>'/usr/bin/zip','ZIPFLAGS'=>'-rl' }, 'AUTHOR' => 'Gilles Darold (gilles@darold.net)', 'ABSTRACT' => 'pgBadger - PostgreSQL log analysis report', 'EXE_FILES' => [ qw(pgbadger) ], 'MAN1PODS' => { 'doc/pgBadger.pod' => 'blib/man1/pgbadger.1p' }, 'DESTDIR' => $DESTDIR, 'INSTALLDIRS' => $INSTALLDIRS, 'clean' => {}, %merge_compat ); pgbadger-11.7/README000066400000000000000000001147571417325540000141150ustar00rootroot00000000000000NAME pgBadger - a fast PostgreSQL log analysis report SYNOPSIS Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average minutes : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average min: number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log (either a timestamp or a time) -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log (either a timestamp or a time) -E | --explode : explode the main report by generating one report per database. Global information not related to a database are added to the postgres database report. -f | --format logtype : possible values: syslog, syslog2, stderr, jsonlog, cvs, pgbouncer, logplex, rds and redshift. Use this option when pgBadger is not able to detect the log format. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -H | --html-outdir path: path to directory where HTML report must be written in incremental mode, binary files stay on directory defined with -O, --outdir option. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Run as single by default or when working with csvlog. -J | --Jobs number : number of log file to parse in parallel. Process one file at a time by default or when csvlog is used. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | --logfile-list file:file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default truncate size is 100000. -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. This option can be used multiple time to output several format. To use json output the Perl module JSON::XS must be installed, To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -Q | --query-numbering : add numbering of queries to the output when using options --dump-all-queries or --normalized-only. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. Can be used multiple time. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -W | --wide-char : encode html output of queries into UTF8 to avoid Perl message "Wide character in print". -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --include-time regex : only timestamps matching the given regex will be included in the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-db name : exclude entries for the specified database from report. Example: "pg_dump". Can be used multiple time. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". Can be used multiple time. --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --exclude-client name : exclude log entries for the specified client ip. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir path : set the path where the pid file must be stored. Default /tmp --pid-file file : set the name of the pid file to manage concurrent execution of pgBadger. Default: pgbadger.pid --rebuild : used to rebuild all html reports in incremental output directories where there's binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, calendar's weeks start on a sunday. Use this option to start on a monday. --iso-week-number : in incremental mode, calendar's weeks start on a monday and respect the ISO 8601 week number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. --normalized-only : only dump all normalized query to out.txt --log-timezone +/-XX : Set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time. --prettify-json : use it if you want json output to be prettified. --month-report YYYY-MM : create a cumulative HTML report over the specified month. Requires incremental output directories and the presence of all necessary binary data files --day-report YYYY-MM-DD: create an HTML report over the specified day. Requires incremental output directories and the presence of all necessary binary data files --noexplain : do not process lines generated by auto_explain. --command CMD : command to execute to retrieve log entries on stdin. pgBadger will open a pipe to the command and parse log entries generated by the command. --no-week : inform pgbadger to not build weekly reports in incremental mode. Useful if it takes too much time. --explain-url URL : use it to override the url of the graphical explain tool. Default: http://explain.depesz.com/?is_public=0&is_anon=0&plan= --tempdir DIR : set directory where temporary files will be written Default: File::Spec->tmpdir() || '/tmp' --no-process-info : disable changing process title to help identify pgbadger process, some system do not support it. --dump-all-queries : dump all queries found in the log file replacing bind parameters are included in the queries at their respective placeholders position. --keep-comments : do not remove comments from normalized queries. It can be useful if you want to distinguish between same normalized queries. --no-progressbar : disable progressbar. pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-port port ssh port to use for the connection. Default: 22. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey Log file to parse can also be specified using an URI, supported protocol are http[s] and [s]ftp. The curl command will be used to download the file and the file will be parsed during download. The ssh protocol is also supported and will use the ssh command like with the remote host use. See examples bellow. Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-10.1-main.log Use URI notation for remote log file: pgbadger http://172.12.110.1//var/log/postgresql/postgresql-10.1-main.log pgbadger ftp://username@172.12.110.14/postgresql-10.1-main.log pgbadger ssh://username@172.12.110.14:2222//var/log/postgresql/postgresql-10.1-main.log* You can use together a local PostgreSQL log and a remote pgbouncer log file to parse: pgbadger /var/log/postgresql/postgresql-10.1-main.log ssh://username@172.12.110.14/pgbouncer.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: user=%u,db=%d ' /pglog/postgresql-10.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +\%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have other PostgreSQL log file to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. pgBadger also support Heroku PostgreSQL logs using logplex format: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - this will stream Heroku PostgreSQL log to pgbadger through stdin. pgBadger can auto detect RDS and cloudwatch PostgreSQL logs using rds format: pgbadger -f rds -o rds_out.html rds.log CloudSQL Postgresql logs it's fairly normal PostgreSQL log but encapsulated in JSON format. It is auto detected too by pgBagder but in case you need to force the log format, use `jsonlog` pgbadger -f jsonlog -o cloudsql_out.html cloudsql.log This is the same than with the jsonlog extension, the json format is different but pgBadger can parse both format. To create a cumulative report over a month use command: pgbadger --month-report 2919-05 /path/to/incremantal/reports/ this will add a link to the month name into the calendar view in incremental reports to look at report for month 2019 May. Use -E or --explode if the reports were built using this option. DESCRIPTION pgBadger is a PostgreSQL log analyzer built for speed providing fully detailed reports based on your PostgreSQL log files. It's a small standalone Perl script that outperforms any other PostgreSQL log analyzer. It is written in pure Perl and uses a JavaScript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap JavaScript library and the FontAwesome webfont for better design. Everything is embedded. pgBadger is able to autodetect your log file format (syslog, stderr, csvlog or jsonlog) if the file is long enough. It is designed to parse huge log files as well as compressed files. Supported compressed format are gzip, bzip2, lz4, xz, zip and zstd. For the xz format you must have an xz version upper than 5.05 that supports the --robot option. In order pgbadger determine uncompressed file size with lz4, file must be compressed with --content-size option. For the complete list of features see below. All charts are zoomable and can be saved as PNG images. You can also limit pgBadger to only report errors or remove any part of the report using command line options. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify the %t and %p patterns. pgBadger allows parallel processing of a single log file or multiple files through the use of the -j option specifying the number of CPUs. If you want to save system performance you can also use log_duration instead of log_min_duration_statement to have reports on duration and number of queries only. FEATURE pgBadger reports everything about your SQL queries: Overall statistics. The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. Queries generating the largest temporary files. The slowest queries. Queries that took up the most time. The most frequent queries. The most frequent errors. Histogram of query times. Histogram of sessions times. Users involved in top queries. Applications involved in top queries. Queries generating the most cancellation. Queries most cancelled. The most time consuming prepare/bind queries The following reports are also available with hourly charts divided into periods of five minutes: SQL queries statistics. Temporary file statistics. Checkpoints statistics. Autovacuum and autoanalyze statistics. Cancelled queries. Error events (panic, fatal, error and warning). Error class distribution. There are also some pie charts about distribution of: Locks statistics. Queries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client/application. Connections per database/user/client/application. Autovacuum and autoanalyze per table. Queries per user and total duration per user. All charts are zoomable and can be saved as PNG images. SQL queries reported are highlighted and beautified automatically. pgBadger is also able to parse PgBouncer log files and to create the following reports: Request Throughput Bytes I/O Throughput Queries Average duration Simultaneous sessions Histogram of sessions times Sessions per database Sessions per user Sessions per host Established connections Connections per database Connections per user Connections per host Most used reserved pools Most Frequent Errors/Events You can also have incremental reports with one report per day and a cumulative report per week. Two multiprocess modes are available to speed up log parsing, one using one core per log file, and the second using multiple cores to parse a single file. These modes can be combined. Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries/errors occurring per hour, but you can specify the granularity down to the minute. pgBadger can also be used in a central place to parse remote log files using a passwordless SSH connection. This mode can be used with compressed files and in the multiprocess per file mode (-J) but can not be used with the CSV log format. REQUIREMENT pgBadger comes as a single Perl script - you do not need anything other than a modern Perl distribution. Charts are rendered using a JavaScript library so you don't need anything other than a web browser. Your browser will do all the work. If you planned to parse PostgreSQL CSV log files you might need some Perl Modules: Text::CSV_XS - to parse PostgreSQL CSV log files. This module is optional, if you don't have PostgreSQL log in the CSV format you don't need to install it. If you want to export statistics as JSON file you need an additional Perl module: JSON::XS - JSON serialising/deserialising, done correctly and fast This module is optional, if you don't select the json output format you don't need to install it. You can install it on a Debian like system using: sudo apt-get install libjson-xs-perl and in RPM like system using: sudo yum install perl-JSON-XS Compressed log file format is autodetected from the file extension. If pgBadger find a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat, with lz4 it will use lz4cat, with zst it will use zstdcat and if the file extension is zip or xz then the unzip or xz utilities will be used. If those utilities are not found in the PATH environment variable then use the --zcat command line option to change this path. For example: --zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc" --zcat="C:\tools\unzip -p" By default pgBadger will use the zcat, bzcat, lz4cat, zstdcat and unzip utilities following the file extension. If you use the default autodetection compress format you can mixed gz, bz2, lz4, xz, zip or zstd files. Specifying a custom value to --zcat option will remove this feature of mixed compressed format. Note that multiprocessing can not be used with compressed files or CSV files as well as under Windows platform. INSTALLATION Download the tarball from GitHub and unpack the archive as follow: tar xzf pgbadger-11.x.tar.gz cd pgbadger-11.x/ perl Makefile.PL make && sudo make install This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: perl Makefile.PL INSTALLDIRS=vendor By default INSTALLDIRS is set to site. POSTGRESQL CONFIGURATION You must enable and set some configuration directives in your postgresql.conf before starting. You must first enable SQL query logging to have something to parse: log_min_duration_statement = 0 Here every statement will be logged, on a busy server you may want to increase this value to only log queries with a longer duration. Note that if you have log_statement set to 'all' nothing will be logged through the log_min_duration_statement directive. See the next chapter for more information. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify a time escape sequence (%t, %m or %n) and the process related escape sequence (%p or %c). For example, with 'stderr' log format, log_line_prefix must be at least: log_line_prefix = '%t [%p]: ' Log line prefix could add user, database name, application name and client ip address as follows: log_line_prefix = '%t [%p]: user=%u,db=%d,app=%a,client=%h ' or for syslog log file format: log_line_prefix = 'user=%u,db=%d,app=%a,client=%h ' Log line prefix for stderr output could also be: log_line_prefix = '%t [%p]: db=%d,user=%u,app=%a,client=%h ' or for syslog output: log_line_prefix = 'db=%d,user=%u,app=%a,client=%h ' You need to enable other parameters in postgresql.conf to get more information from your log files: log_checkpoints = on log_connections = on log_disconnections = on log_lock_waits = on log_temp_files = 0 log_autovacuum_min_duration = 0 log_error_verbosity = default Do not enable log_statement as its log format will not be parsed by pgBadger. Of course your log messages should be in English with or without locale support: lc_messages='en_US.UTF-8' lc_messages='C' pgBadger parser do not support other locale like 'fr_FR.UTF-8' for example. LOG STATEMENTS Considerations about log_min_duration_statement, log_duration and log_statement configuration directives. If you want the query statistics to include the actual query strings, you must set log_min_duration_statement to 0 or more milliseconds. If you just want to report duration and number of queries and don't want all details about queries, set log_min_duration_statement to -1 to disable it and enable log_duration in your postgresql.conf file. If you want to add the most common request report you can either choose to set log_min_duration_statement to a higher value or choose to enable log_statement. Enabling log_min_duration_statement will add reports about slowest queries and queries that took up the most time. Take care that if you have log_statement set to 'all' nothing will be logged with log_min_duration_statement. Warning: Do not enable both log_min_duration_statement, log_duration and log_statement all together, this will result in wrong counter values. Note that this will also increase drastically the size of your log. log_min_duration_statement should always be preferred. PARALLEL PROCESSING To enable parallel processing you just have to use the -j N option where N is the number of cores you want to use. pgBadger will then proceed as follow: for each log file chunk size = int(file size / N) look at start/end offsets of these chunks fork N processes and seek to the start offset of each chunk each process will terminate when the parser reach the end offset of its chunk each process write stats into a binary temporary file wait for all children processes to terminate All binary temporary files generated will then be read and loaded into memory to build the html output. With that method, at start/end of chunks pgBadger may truncate or omit a maximum of N queries per log file which is an insignificant gap if you have millions of queries in your log file. The chance that the query that you were looking for is lost is near 0, this is why I think this gap is livable. Most of the time the query is counted twice but truncated. When you have many small log files and many CPUs it is speedier to dedicate one core to one log file at a time. To enable this behavior you have to use option -J N instead. With 200 log files of 10MB each the use of the -J option starts being really interesting with 8 Cores. Using this method you will be sure not to lose any queries in the reports. Here is a benchmark done on a server with 8 CPUs and a single file of 9.5GB. Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+---------+-------+-------+------ -j | 1h41m18 | 50m25 | 25m39 | 15m58 -J | 1h41m18 | 54m28 | 41m16 | 34m45 With 200 log files of 10MB each, so 2GB in total, the results are slightly different: Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+-------+-------+-------+------ -j | 20m15 | 9m56 | 5m20 | 4m20 -J | 20m15 | 9m49 | 5m00 | 2m40 So it is recommended to use -j unless you have hundreds of small log files and can use at least 8 CPUs. IMPORTANT: when you are using parallel parsing pgBadger will generate a lot of temporary files in the /tmp directory and will remove them at the end, so do not remove those files unless pgBadger is not running. They are all named with the following template tmp_pgbadgerXXXX.bin so they can be easily identified. INCREMENTAL REPORTS pgBadger includes an automatic incremental report mode using option -I or --incremental. When running in this mode, pgBadger will generate one report per day and a cumulative report per week. Output is first done in binary format into the mandatory output directory (see option -O or --outdir), then in HTML format for daily and weekly reports with a main index file. The main index file will show a dropdown menu per week with a link to each week report and links to daily reports of each week. For example, if you run pgBadger as follows based on a daily rotated file: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ you will have all daily and weekly reports for the full running period. In this mode pgBadger will create an automatic incremental file in the output directory, so you don't have to use the -l option unless you want to change the path of that file. This means that you can run pgBadger in this mode each day on a log file rotated each week, and it will not count the log entries twice. To save disk space you may want to use the -X or --extra-files command line option to force pgBadger to write JavaScript and CSS to separate files in the output directory. The resources will then be loaded using script and link tags. Rebuilding reports Incremental reports can be rebuilt after a pgbadger report fix or a new feature to update all HTML reports. To rebuild all reports where a binary file is still present proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. Monthly reports By default pgBadger in incremental mode only computes daily and weekly reports. If you want monthly cumulative reports you will have to use a separate command to specify the report to build. For example to build a report for August 2019: pgbadger -X --month-report 2919-08 /var/www/pg_reports/ this will add a link to the month name into the calendar view of incremental reports to look at monthly report. The report for a current month can be run every day it is entirely rebuilt each time. The monthly report is not built by default because it could take lot of time following the amount of data. If reports were built with the per database option ( -E | --explode ) it must be used too when calling pgbadger to build monthly report: pgbadger -E -X --month-report 2919-08 /var/www/pg_reports/ This is the same when using the rebuild option ( -R | --rebuild ). BINARY FORMAT Using the binary format it is possible to create custom incremental and cumulative reports. For example, if you want to refresh a pgBadger report each hour from a daily PostgreSQL log file, you can proceed by running each hour the following commands: pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log to generate the incremental data files in binary format. And to generate the fresh HTML report from that binary file: pgbadger sunday/*.bin Or as another example, if you generate one log file per hour and you want reports to be rebuilt each time the log file is rotated, proceed as follows: pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log ... When you want to refresh the HTML report, for example each time after a new binary file is generated, just do the following: pgbadger -o day1_report.html day1/*.bin Adjust the commands to suit your particular needs. JSON FORMAT JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger result into other monitoring tools like Cacti or Graphite. AUTHORS pgBadger is an original work from Gilles Darold. The pgBadger logo is an original creation of Damien Cazeils. The pgBadger v4.x design comes from the "Art is code" company. This web site is a work of Gilles Darold. pgBadger is maintained by Gilles Darold and every one who wants to contribute. Many people have contributed to pgBadger, they are all quoted in the Changelog file. LICENSE pgBadger is free software distributed under the PostgreSQL Licence. Copyright (c) 2012-2022, Gilles Darold A modified version of the SQL::Beautify Perl Module is embedded in pgBadger with copyright (C) 2009 by Jonas Kramer and is published under the terms of the Artistic License 2.0. pgbadger-11.7/README.md000066400000000000000000001113371417325540000145030ustar00rootroot00000000000000### TABLE OF CONTENTS - [NAME](#NAME) - [SYNOPSIS](#SYNOPSIS) - [DESCRIPTION](#DESCRIPTION) - [FEATURE](#FEATURE) - [REQUIREMENT](#REQUIREMENT) - [INSTALLATION](#INSTALLATION) - [POSTGRESQL-CONFIGURATION](#POSTGRESQL-CONFIGURATION) - [LOG-STATEMENTS](#LOG-STATEMENTS) - [PARALLEL-PROCESSING](#PARALLEL-PROCESSING) - [INCREMENTAL-REPORTS](#INCREMENTAL-REPORTS) - [BINARY-FORMAT](#BINARY-FORMAT) - [JSON-FORMAT](#JSON-FORMAT) - [AUTHORS](#AUTHORS) - [LICENSE](#LICENSE) ### NAME pgBadger - a fast PostgreSQL log analysis report ### SYNOPSIS Usage: pgbadger \[options\] logfile \[...\] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average minutes : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average min: number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log (either a timestamp or a time) -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log (either a timestamp or a time) -E | --explode : explode the main report by generating one report per database. Global information not related to a database are added to the postgres database report. -f | --format logtype : possible values: syslog, syslog2, stderr, jsonlog, cvs, pgbouncer, logplex, rds and redshift. Use this option when pgBadger is not able to detect the log format. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -H | --html-outdir path: path to directory where HTML report must be written in incremental mode, binary files stay on directory defined with -O, --outdir option. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Run as single by default or when working with csvlog. -J | --Jobs number : number of log file to parse in parallel. Process one file at a time by default or when csvlog is used. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | --logfile-list file:file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default truncate size is 100000. -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. This option can be used multiple time to output several format. To use json output the Perl module JSON::XS must be installed, To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -Q | --query-numbering : add numbering of queries to the output when using options --dump-all-queries or --normalized-only. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. Can be used multiple time. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -W | --wide-char : encode html output of queries into UTF8 to avoid Perl message "Wide character in print". -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --include-time regex : only timestamps matching the given regex will be included in the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-db name : exclude entries for the specified database from report. Example: "pg_dump". Can be used multiple time. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". Can be used multiple time. --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --exclude-client name : exclude log entries for the specified client ip. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir path : set the path where the pid file must be stored. Default /tmp --pid-file file : set the name of the pid file to manage concurrent execution of pgBadger. Default: pgbadger.pid --rebuild : used to rebuild all html reports in incremental output directories where there's binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, calendar's weeks start on a sunday. Use this option to start on a monday. --iso-week-number : in incremental mode, calendar's weeks start on a monday and respect the ISO 8601 week number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. --normalized-only : only dump all normalized query to out.txt --log-timezone +/-XX : Set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time. --prettify-json : use it if you want json output to be prettified. --month-report YYYY-MM : create a cumulative HTML report over the specified month. Requires incremental output directories and the presence of all necessary binary data files --day-report YYYY-MM-DD: create an HTML report over the specified day. Requires incremental output directories and the presence of all necessary binary data files --noexplain : do not process lines generated by auto_explain. --command CMD : command to execute to retrieve log entries on stdin. pgBadger will open a pipe to the command and parse log entries generated by the command. --no-week : inform pgbadger to not build weekly reports in incremental mode. Useful if it takes too much time. --explain-url URL : use it to override the url of the graphical explain tool. Default: http://explain.depesz.com/?is_public=0&is_anon=0&plan= --tempdir DIR : set directory where temporary files will be written Default: File::Spec->tmpdir() || '/tmp' --no-process-info : disable changing process title to help identify pgbadger process, some system do not support it. --dump-all-queries : dump all queries found in the log file replacing bind parameters are included in the queries at their respective placeholders position. --keep-comments : do not remove comments from normalized queries. It can be useful if you want to distinguish between same normalized queries. --no-progressbar : disable progressbar. pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-port port ssh port to use for the connection. Default: 22. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey Log file to parse can also be specified using an URI, supported protocol are http\[s\] and \[s\]ftp. The curl command will be used to download the file and the file will be parsed during download. The ssh protocol is also supported and will use the ssh command like with the remote host use. See examples bellow. Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-10.1-main.log Use URI notation for remote log file: pgbadger http://172.12.110.1//var/log/postgresql/postgresql-10.1-main.log pgbadger ftp://username@172.12.110.14/postgresql-10.1-main.log pgbadger ssh://username@172.12.110.14:2222//var/log/postgresql/postgresql-10.1-main.log* You can use together a local PostgreSQL log and a remote pgbouncer log file to parse: pgbadger /var/log/postgresql/postgresql-10.1-main.log ssh://username@172.12.110.14/pgbouncer.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: user=%u,db=%d ' /pglog/postgresql-10.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +\%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg\_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg\_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg\_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have other PostgreSQL log file to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. pgBadger also support Heroku PostgreSQL logs using logplex format: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - this will stream Heroku PostgreSQL log to pgbadger through stdin. pgBadger can auto detect RDS and cloudwatch PostgreSQL logs using rds format: pgbadger -f rds -o rds_out.html rds.log CloudSQL Postgresql logs it's fairly normal PostgreSQL log but encapsulated in JSON format. It is auto detected too by pgBagder but in case you need to force the log format, use \`jsonlog\` pgbadger -f jsonlog -o cloudsql_out.html cloudsql.log This is the same than with the jsonlog extension, the json format is different but pgBadger can parse both format. To create a cumulative report over a month use command: pgbadger --month-report 2919-05 /path/to/incremantal/reports/ this will add a link to the month name into the calendar view in incremental reports to look at report for month 2019 May. Use -E or --explode if the reports were built using this option. ### DESCRIPTION pgBadger is a PostgreSQL log analyzer built for speed providing fully detailed reports based on your PostgreSQL log files. It's a small standalone Perl script that outperforms any other PostgreSQL log analyzer. It is written in pure Perl and uses a JavaScript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap JavaScript library and the FontAwesome webfont for better design. Everything is embedded. pgBadger is able to autodetect your log file format (syslog, stderr, csvlog or jsonlog) if the file is long enough. It is designed to parse huge log files as well as compressed files. Supported compressed format are gzip, bzip2, lz4, xz, zip and zstd. For the xz format you must have an xz version upper than 5.05 that supports the --robot option. In order pgbadger determine uncompressed file size with lz4, file must be compressed with --content-size option. For the complete list of features see below. All charts are zoomable and can be saved as PNG images. You can also limit pgBadger to only report errors or remove any part of the report using command line options. pgBadger supports any custom format set into the log\_line\_prefix directive of your postgresql.conf file as long as it at least specify the %t and %p patterns. pgBadger allows parallel processing of a single log file or multiple files through the use of the -j option specifying the number of CPUs. If you want to save system performance you can also use log\_duration instead of log\_min\_duration\_statement to have reports on duration and number of queries only. ### FEATURE pgBadger reports everything about your SQL queries: Overall statistics. The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. Queries generating the largest temporary files. The slowest queries. Queries that took up the most time. The most frequent queries. The most frequent errors. Histogram of query times. Histogram of sessions times. Users involved in top queries. Applications involved in top queries. Queries generating the most cancellation. Queries most cancelled. The most time consuming prepare/bind queries The following reports are also available with hourly charts divided into periods of five minutes: SQL queries statistics. Temporary file statistics. Checkpoints statistics. Autovacuum and autoanalyze statistics. Cancelled queries. Error events (panic, fatal, error and warning). Error class distribution. There are also some pie charts about distribution of: Locks statistics. Queries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client/application. Connections per database/user/client/application. Autovacuum and autoanalyze per table. Queries per user and total duration per user. All charts are zoomable and can be saved as PNG images. SQL queries reported are highlighted and beautified automatically. pgBadger is also able to parse PgBouncer log files and to create the following reports: Request Throughput Bytes I/O Throughput Queries Average duration Simultaneous sessions Histogram of sessions times Sessions per database Sessions per user Sessions per host Established connections Connections per database Connections per user Connections per host Most used reserved pools Most Frequent Errors/Events You can also have incremental reports with one report per day and a cumulative report per week. Two multiprocess modes are available to speed up log parsing, one using one core per log file, and the second using multiple cores to parse a single file. These modes can be combined. Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries/errors occurring per hour, but you can specify the granularity down to the minute. pgBadger can also be used in a central place to parse remote log files using a passwordless SSH connection. This mode can be used with compressed files and in the multiprocess per file mode (-J) but can not be used with the CSV log format. ### REQUIREMENT pgBadger comes as a single Perl script - you do not need anything other than a modern Perl distribution. Charts are rendered using a JavaScript library so you don't need anything other than a web browser. Your browser will do all the work. If you planned to parse PostgreSQL CSV log files you might need some Perl Modules: Text::CSV_XS - to parse PostgreSQL CSV log files. This module is optional, if you don't have PostgreSQL log in the CSV format you don't need to install it. If you want to export statistics as JSON file you need an additional Perl module: JSON::XS - JSON serialising/deserialising, done correctly and fast This module is optional, if you don't select the json output format you don't need to install it. You can install it on a Debian like system using: sudo apt-get install libjson-xs-perl and in RPM like system using: sudo yum install perl-JSON-XS Compressed log file format is autodetected from the file extension. If pgBadger find a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat, with lz4 it will use lz4cat, with zst it will use zstdcat and if the file extension is zip or xz then the unzip or xz utilities will be used. If those utilities are not found in the PATH environment variable then use the --zcat command line option to change this path. For example: --zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc" --zcat="C:\tools\unzip -p" By default pgBadger will use the zcat, bzcat, lz4cat, zstdcat and unzip utilities following the file extension. If you use the default autodetection compress format you can mixed gz, bz2, lz4, xz, zip or zstd files. Specifying a custom value to \--zcat option will remove this feature of mixed compressed format. Note that multiprocessing can not be used with compressed files or CSV files as well as under Windows platform. ### INSTALLATION Download the tarball from GitHub and unpack the archive as follow: tar xzf pgbadger-11.x.tar.gz cd pgbadger-11.x/ perl Makefile.PL make && sudo make install This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: perl Makefile.PL INSTALLDIRS=vendor By default INSTALLDIRS is set to site. ### POSTGRESQL CONFIGURATION You must enable and set some configuration directives in your postgresql.conf before starting. You must first enable SQL query logging to have something to parse: log_min_duration_statement = 0 Here every statement will be logged, on a busy server you may want to increase this value to only log queries with a longer duration. Note that if you have log\_statement set to 'all' nothing will be logged through the log\_min\_duration\_statement directive. See the next chapter for more information. pgBadger supports any custom format set into the log\_line\_prefix directive of your postgresql.conf file as long as it at least specify a time escape sequence (%t, %m or %n) and the process related escape sequence (%p or %c). For example, with 'stderr' log format, log\_line\_prefix must be at least: log_line_prefix = '%t [%p]: ' Log line prefix could add user, database name, application name and client ip address as follows: log_line_prefix = '%t [%p]: user=%u,db=%d,app=%a,client=%h ' or for syslog log file format: log_line_prefix = 'user=%u,db=%d,app=%a,client=%h ' Log line prefix for stderr output could also be: log_line_prefix = '%t [%p]: db=%d,user=%u,app=%a,client=%h ' or for syslog output: log_line_prefix = 'db=%d,user=%u,app=%a,client=%h ' You need to enable other parameters in postgresql.conf to get more information from your log files: log_checkpoints = on log_connections = on log_disconnections = on log_lock_waits = on log_temp_files = 0 log_autovacuum_min_duration = 0 log_error_verbosity = default Do not enable log\_statement as its log format will not be parsed by pgBadger. Of course your log messages should be in English with or without locale support: lc_messages='en_US.UTF-8' lc_messages='C' pgBadger parser do not support other locale like 'fr\_FR.UTF-8' for example. ### LOG STATEMENTS Considerations about log\_min\_duration\_statement, log\_duration and log\_statement configuration directives. If you want the query statistics to include the actual query strings, you must set log\_min\_duration\_statement to 0 or more milliseconds. If you just want to report duration and number of queries and don't want all details about queries, set log\_min\_duration\_statement to -1 to disable it and enable log\_duration in your postgresql.conf file. If you want to add the most common request report you can either choose to set log\_min\_duration\_statement to a higher value or choose to enable log\_statement. Enabling log\_min\_duration\_statement will add reports about slowest queries and queries that took up the most time. Take care that if you have log\_statement set to 'all' nothing will be logged with log\_min\_duration\_statement. Warning: Do not enable both log\_min\_duration\_statement, log\_duration and log\_statement all together, this will result in wrong counter values. Note that this will also increase drastically the size of your log. log\_min\_duration\_statement should always be preferred. ### PARALLEL PROCESSING To enable parallel processing you just have to use the -j N option where N is the number of cores you want to use. pgBadger will then proceed as follow: for each log file chunk size = int(file size / N) look at start/end offsets of these chunks fork N processes and seek to the start offset of each chunk each process will terminate when the parser reach the end offset of its chunk each process write stats into a binary temporary file wait for all children processes to terminate All binary temporary files generated will then be read and loaded into memory to build the html output. With that method, at start/end of chunks pgBadger may truncate or omit a maximum of N queries per log file which is an insignificant gap if you have millions of queries in your log file. The chance that the query that you were looking for is lost is near 0, this is why I think this gap is livable. Most of the time the query is counted twice but truncated. When you have many small log files and many CPUs it is speedier to dedicate one core to one log file at a time. To enable this behavior you have to use option -J N instead. With 200 log files of 10MB each the use of the -J option starts being really interesting with 8 Cores. Using this method you will be sure not to lose any queries in the reports. Here is a benchmark done on a server with 8 CPUs and a single file of 9.5GB. Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+---------+-------+-------+------ -j | 1h41m18 | 50m25 | 25m39 | 15m58 -J | 1h41m18 | 54m28 | 41m16 | 34m45 With 200 log files of 10MB each, so 2GB in total, the results are slightly different: Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+-------+-------+-------+------ -j | 20m15 | 9m56 | 5m20 | 4m20 -J | 20m15 | 9m49 | 5m00 | 2m40 So it is recommended to use -j unless you have hundreds of small log files and can use at least 8 CPUs. IMPORTANT: when you are using parallel parsing pgBadger will generate a lot of temporary files in the /tmp directory and will remove them at the end, so do not remove those files unless pgBadger is not running. They are all named with the following template tmp\_pgbadgerXXXX.bin so they can be easily identified. ### INCREMENTAL REPORTS pgBadger includes an automatic incremental report mode using option -I or \--incremental. When running in this mode, pgBadger will generate one report per day and a cumulative report per week. Output is first done in binary format into the mandatory output directory (see option -O or --outdir), then in HTML format for daily and weekly reports with a main index file. The main index file will show a dropdown menu per week with a link to each week report and links to daily reports of each week. For example, if you run pgBadger as follows based on a daily rotated file: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ you will have all daily and weekly reports for the full running period. In this mode pgBadger will create an automatic incremental file in the output directory, so you don't have to use the -l option unless you want to change the path of that file. This means that you can run pgBadger in this mode each day on a log file rotated each week, and it will not count the log entries twice. To save disk space you may want to use the -X or --extra-files command line option to force pgBadger to write JavaScript and CSS to separate files in the output directory. The resources will then be loaded using script and link tags. #### Rebuilding reports Incremental reports can be rebuilt after a pgbadger report fix or a new feature to update all HTML reports. To rebuild all reports where a binary file is still present proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. #### Monthly reports By default pgBadger in incremental mode only computes daily and weekly reports. If you want monthly cumulative reports you will have to use a separate command to specify the report to build. For example to build a report for August 2019: pgbadger -X --month-report 2919-08 /var/www/pg_reports/ this will add a link to the month name into the calendar view of incremental reports to look at monthly report. The report for a current month can be run every day it is entirely rebuilt each time. The monthly report is not built by default because it could take lot of time following the amount of data. If reports were built with the per database option ( -E | --explode ) it must be used too when calling pgbadger to build monthly report: pgbadger -E -X --month-report 2919-08 /var/www/pg_reports/ This is the same when using the rebuild option ( -R | --rebuild ). ### BINARY FORMAT Using the binary format it is possible to create custom incremental and cumulative reports. For example, if you want to refresh a pgBadger report each hour from a daily PostgreSQL log file, you can proceed by running each hour the following commands: pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log to generate the incremental data files in binary format. And to generate the fresh HTML report from that binary file: pgbadger sunday/*.bin Or as another example, if you generate one log file per hour and you want reports to be rebuilt each time the log file is rotated, proceed as follows: pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log ... When you want to refresh the HTML report, for example each time after a new binary file is generated, just do the following: pgbadger -o day1_report.html day1/*.bin Adjust the commands to suit your particular needs. ### JSON FORMAT JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger result into other monitoring tools like Cacti or Graphite. ### AUTHORS pgBadger is an original work from Gilles Darold. The pgBadger logo is an original creation of Damien Cazeils. The pgBadger v4.x design comes from the "Art is code" company. This web site is a work of Gilles Darold. pgBadger is maintained by Gilles Darold and every one who wants to contribute. Many people have contributed to pgBadger, they are all quoted in the Changelog file. ### LICENSE pgBadger is free software distributed under the PostgreSQL Licence. Copyright (c) 2012-2022, Gilles Darold A modified version of the SQL::Beautify Perl Module is embedded in pgBadger with copyright (C) 2009 by Jonas Kramer and is published under the terms of the Artistic License 2.0. pgbadger-11.7/doc/000077500000000000000000000000001417325540000137635ustar00rootroot00000000000000pgbadger-11.7/doc/pgBadger.pod000066400000000000000000001041251417325540000162050ustar00rootroot00000000000000=head1 NAME pgBadger - a fast PostgreSQL log analysis report =head1 SYNOPSIS Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average minutes : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average min: number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log (either a timestamp or a time) -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log (either a timestamp or a time) -E | --explode : explode the main report by generating one report per database. Global information not related to a database are added to the postgres database report. -f | --format logtype : possible values: syslog, syslog2, stderr, jsonlog, cvs, pgbouncer, logplex, rds and redshift. Use this option when pgBadger is not able to detect the log format. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -H | --html-outdir path: path to directory where HTML report must be written in incremental mode, binary files stay on directory defined with -O, --outdir option. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Run as single by default or when working with csvlog. -J | --Jobs number : number of log file to parse in parallel. Process one file at a time by default or when csvlog is used. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | --logfile-list file:file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default truncate size is 100000. -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. This option can be used multiple time to output several format. To use json output the Perl module JSON::XS must be installed, To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -Q | --query-numbering : add numbering of queries to the output when using options --dump-all-queries or --normalized-only. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. Can be used multiple time. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -W | --wide-char : encode html output of queries into UTF8 to avoid Perl message "Wide character in print". -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --include-time regex : only timestamps matching the given regex will be included in the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-db name : exclude entries for the specified database from report. Example: "pg_dump". Can be used multiple time. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". Can be used multiple time. --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --exclude-client name : exclude log entries for the specified client ip. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir path : set the path where the pid file must be stored. Default /tmp --pid-file file : set the name of the pid file to manage concurrent execution of pgBadger. Default: pgbadger.pid --rebuild : used to rebuild all html reports in incremental output directories where there's binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, calendar's weeks start on a sunday. Use this option to start on a monday. --iso-week-number : in incremental mode, calendar's weeks start on a monday and respect the ISO 8601 week number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. --normalized-only : only dump all normalized query to out.txt --log-timezone +/-XX : Set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time. --prettify-json : use it if you want json output to be prettified. --month-report YYYY-MM : create a cumulative HTML report over the specified month. Requires incremental output directories and the presence of all necessary binary data files --day-report YYYY-MM-DD: create an HTML report over the specified day. Requires incremental output directories and the presence of all necessary binary data files --noexplain : do not process lines generated by auto_explain. --command CMD : command to execute to retrieve log entries on stdin. pgBadger will open a pipe to the command and parse log entries generated by the command. --no-week : inform pgbadger to not build weekly reports in incremental mode. Useful if it takes too much time. --explain-url URL : use it to override the url of the graphical explain tool. Default: http://explain.depesz.com/?is_public=0&is_anon=0&plan= --tempdir DIR : set directory where temporary files will be written Default: File::Spec->tmpdir() || '/tmp' --no-process-info : disable changing process title to help identify pgbadger process, some system do not support it. --dump-all-queries : dump all queries found in the log file replacing bind parameters are included in the queries at their respective placeholders position. --keep-comments : do not remove comments from normalized queries. It can be useful if you want to distinguish between same normalized queries. --no-progressbar : disable progressbar. pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-port port ssh port to use for the connection. Default: 22. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey Log file to parse can also be specified using an URI, supported protocol are http[s] and [s]ftp. The curl command will be used to download the file and the file will be parsed during download. The ssh protocol is also supported and will use the ssh command like with the remote host use. See examples bellow. Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-10.1-main.log Use URI notation for remote log file: pgbadger http://172.12.110.1//var/log/postgresql/postgresql-10.1-main.log pgbadger ftp://username@172.12.110.14/postgresql-10.1-main.log pgbadger ssh://username@172.12.110.14:2222//var/log/postgresql/postgresql-10.1-main.log* You can use together a local PostgreSQL log and a remote pgbouncer log file to parse: pgbadger /var/log/postgresql/postgresql-10.1-main.log ssh://username@172.12.110.14/pgbouncer.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: user=%u,db=%d ' /pglog/postgresql-10.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +\%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have other PostgreSQL log file to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. pgBadger also support Heroku PostgreSQL logs using logplex format: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - this will stream Heroku PostgreSQL log to pgbadger through stdin. pgBadger can auto detect RDS and cloudwatch PostgreSQL logs using rds format: pgbadger -f rds -o rds_out.html rds.log CloudSQL Postgresql logs it's fairly normal PostgreSQL log but encapsulated in JSON format. It is auto detected too by pgBagder but in case you need to force the log format, use `jsonlog` pgbadger -f jsonlog -o cloudsql_out.html cloudsql.log This is the same than with the jsonlog extension, the json format is different but pgBadger can parse both format. To create a cumulative report over a month use command: pgbadger --month-report 2919-05 /path/to/incremantal/reports/ this will add a link to the month name into the calendar view in incremental reports to look at report for month 2019 May. Use -E or --explode if the reports were built using this option. =head1 DESCRIPTION pgBadger is a PostgreSQL log analyzer built for speed providing fully detailed reports based on your PostgreSQL log files. It's a small standalone Perl script that outperforms any other PostgreSQL log analyzer. It is written in pure Perl and uses a JavaScript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap JavaScript library and the FontAwesome webfont for better design. Everything is embedded. pgBadger is able to autodetect your log file format (syslog, stderr, csvlog or jsonlog) if the file is long enough. It is designed to parse huge log files as well as compressed files. Supported compressed format are gzip, bzip2, lz4, xz, zip and zstd. For the xz format you must have an xz version upper than 5.05 that supports the --robot option. In order pgbadger determine uncompressed file size with lz4, file must be compressed with --content-size option. For the complete list of features see below. All charts are zoomable and can be saved as PNG images. You can also limit pgBadger to only report errors or remove any part of the report using command line options. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify the %t and %p patterns. pgBadger allows parallel processing of a single log file or multiple files through the use of the -j option specifying the number of CPUs. If you want to save system performance you can also use log_duration instead of log_min_duration_statement to have reports on duration and number of queries only. =head1 FEATURE pgBadger reports everything about your SQL queries: Overall statistics. The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. Queries generating the largest temporary files. The slowest queries. Queries that took up the most time. The most frequent queries. The most frequent errors. Histogram of query times. Histogram of sessions times. Users involved in top queries. Applications involved in top queries. Queries generating the most cancellation. Queries most cancelled. The most time consuming prepare/bind queries The following reports are also available with hourly charts divided into periods of five minutes: SQL queries statistics. Temporary file statistics. Checkpoints statistics. Autovacuum and autoanalyze statistics. Cancelled queries. Error events (panic, fatal, error and warning). Error class distribution. There are also some pie charts about distribution of: Locks statistics. Queries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client/application. Connections per database/user/client/application. Autovacuum and autoanalyze per table. Queries per user and total duration per user. All charts are zoomable and can be saved as PNG images. SQL queries reported are highlighted and beautified automatically. pgBadger is also able to parse PgBouncer log files and to create the following reports: Request Throughput Bytes I/O Throughput Queries Average duration Simultaneous sessions Histogram of sessions times Sessions per database Sessions per user Sessions per host Established connections Connections per database Connections per user Connections per host Most used reserved pools Most Frequent Errors/Events You can also have incremental reports with one report per day and a cumulative report per week. Two multiprocess modes are available to speed up log parsing, one using one core per log file, and the second using multiple cores to parse a single file. These modes can be combined. Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries/errors occurring per hour, but you can specify the granularity down to the minute. pgBadger can also be used in a central place to parse remote log files using a passwordless SSH connection. This mode can be used with compressed files and in the multiprocess per file mode (-J) but can not be used with the CSV log format. =head1 REQUIREMENT pgBadger comes as a single Perl script - you do not need anything other than a modern Perl distribution. Charts are rendered using a JavaScript library so you don't need anything other than a web browser. Your browser will do all the work. If you planned to parse PostgreSQL CSV log files you might need some Perl Modules: Text::CSV_XS - to parse PostgreSQL CSV log files. This module is optional, if you don't have PostgreSQL log in the CSV format you don't need to install it. If you want to export statistics as JSON file you need an additional Perl module: JSON::XS - JSON serialising/deserialising, done correctly and fast This module is optional, if you don't select the json output format you don't need to install it. You can install it on a Debian like system using: sudo apt-get install libjson-xs-perl and in RPM like system using: sudo yum install perl-JSON-XS Compressed log file format is autodetected from the file extension. If pgBadger find a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat, with lz4 it will use lz4cat, with zst it will use zstdcat and if the file extension is zip or xz then the unzip or xz utilities will be used. If those utilities are not found in the PATH environment variable then use the --zcat command line option to change this path. For example: --zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc" --zcat="C:\tools\unzip -p" By default pgBadger will use the zcat, bzcat, lz4cat, zstdcat and unzip utilities following the file extension. If you use the default autodetection compress format you can mixed gz, bz2, lz4, xz, zip or zstd files. Specifying a custom value to --zcat option will remove this feature of mixed compressed format. Note that multiprocessing can not be used with compressed files or CSV files as well as under Windows platform. =head1 INSTALLATION Download the tarball from GitHub and unpack the archive as follow: tar xzf pgbadger-11.x.tar.gz cd pgbadger-11.x/ perl Makefile.PL make && sudo make install This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: perl Makefile.PL INSTALLDIRS=vendor By default INSTALLDIRS is set to site. =head1 POSTGRESQL CONFIGURATION You must enable and set some configuration directives in your postgresql.conf before starting. You must first enable SQL query logging to have something to parse: log_min_duration_statement = 0 Here every statement will be logged, on a busy server you may want to increase this value to only log queries with a longer duration. Note that if you have log_statement set to 'all' nothing will be logged through the log_min_duration_statement directive. See the next chapter for more information. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify a time escape sequence (%t, %m or %n) and the process related escape sequence (%p or %c). For example, with 'stderr' log format, log_line_prefix must be at least: log_line_prefix = '%t [%p]: ' Log line prefix could add user, database name, application name and client ip address as follows: log_line_prefix = '%t [%p]: user=%u,db=%d,app=%a,client=%h ' or for syslog log file format: log_line_prefix = 'user=%u,db=%d,app=%a,client=%h ' Log line prefix for stderr output could also be: log_line_prefix = '%t [%p]: db=%d,user=%u,app=%a,client=%h ' or for syslog output: log_line_prefix = 'db=%d,user=%u,app=%a,client=%h ' You need to enable other parameters in postgresql.conf to get more information from your log files: log_checkpoints = on log_connections = on log_disconnections = on log_lock_waits = on log_temp_files = 0 log_autovacuum_min_duration = 0 log_error_verbosity = default Do not enable log_statement as its log format will not be parsed by pgBadger. Of course your log messages should be in English with or without locale support: lc_messages='en_US.UTF-8' lc_messages='C' pgBadger parser do not support other locale like 'fr_FR.UTF-8' for example. =head1 LOG STATEMENTS Considerations about log_min_duration_statement, log_duration and log_statement configuration directives. If you want the query statistics to include the actual query strings, you must set log_min_duration_statement to 0 or more milliseconds. If you just want to report duration and number of queries and don't want all details about queries, set log_min_duration_statement to -1 to disable it and enable log_duration in your postgresql.conf file. If you want to add the most common request report you can either choose to set log_min_duration_statement to a higher value or choose to enable log_statement. Enabling log_min_duration_statement will add reports about slowest queries and queries that took up the most time. Take care that if you have log_statement set to 'all' nothing will be logged with log_min_duration_statement. Warning: Do not enable both log_min_duration_statement, log_duration and log_statement all together, this will result in wrong counter values. Note that this will also increase drastically the size of your log. log_min_duration_statement should always be preferred. =head1 PARALLEL PROCESSING To enable parallel processing you just have to use the -j N option where N is the number of cores you want to use. pgBadger will then proceed as follow: for each log file chunk size = int(file size / N) look at start/end offsets of these chunks fork N processes and seek to the start offset of each chunk each process will terminate when the parser reach the end offset of its chunk each process write stats into a binary temporary file wait for all children processes to terminate All binary temporary files generated will then be read and loaded into memory to build the html output. With that method, at start/end of chunks pgBadger may truncate or omit a maximum of N queries per log file which is an insignificant gap if you have millions of queries in your log file. The chance that the query that you were looking for is lost is near 0, this is why I think this gap is livable. Most of the time the query is counted twice but truncated. When you have many small log files and many CPUs it is speedier to dedicate one core to one log file at a time. To enable this behavior you have to use option -J N instead. With 200 log files of 10MB each the use of the -J option starts being really interesting with 8 Cores. Using this method you will be sure not to lose any queries in the reports. Here is a benchmark done on a server with 8 CPUs and a single file of 9.5GB. Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+---------+-------+-------+------ -j | 1h41m18 | 50m25 | 25m39 | 15m58 -J | 1h41m18 | 54m28 | 41m16 | 34m45 With 200 log files of 10MB each, so 2GB in total, the results are slightly different: Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+-------+-------+-------+------ -j | 20m15 | 9m56 | 5m20 | 4m20 -J | 20m15 | 9m49 | 5m00 | 2m40 So it is recommended to use -j unless you have hundreds of small log files and can use at least 8 CPUs. IMPORTANT: when you are using parallel parsing pgBadger will generate a lot of temporary files in the /tmp directory and will remove them at the end, so do not remove those files unless pgBadger is not running. They are all named with the following template tmp_pgbadgerXXXX.bin so they can be easily identified. =head1 INCREMENTAL REPORTS pgBadger includes an automatic incremental report mode using option -I or --incremental. When running in this mode, pgBadger will generate one report per day and a cumulative report per week. Output is first done in binary format into the mandatory output directory (see option -O or --outdir), then in HTML format for daily and weekly reports with a main index file. The main index file will show a dropdown menu per week with a link to each week report and links to daily reports of each week. For example, if you run pgBadger as follows based on a daily rotated file: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ you will have all daily and weekly reports for the full running period. In this mode pgBadger will create an automatic incremental file in the output directory, so you don't have to use the -l option unless you want to change the path of that file. This means that you can run pgBadger in this mode each day on a log file rotated each week, and it will not count the log entries twice. To save disk space you may want to use the -X or --extra-files command line option to force pgBadger to write JavaScript and CSS to separate files in the output directory. The resources will then be loaded using script and link tags. =head2 Rebuilding reports Incremental reports can be rebuilt after a pgbadger report fix or a new feature to update all HTML reports. To rebuild all reports where a binary file is still present proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. =head2 Monthly reports By default pgBadger in incremental mode only computes daily and weekly reports. If you want monthly cumulative reports you will have to use a separate command to specify the report to build. For example to build a report for August 2019: pgbadger -X --month-report 2919-08 /var/www/pg_reports/ this will add a link to the month name into the calendar view of incremental reports to look at monthly report. The report for a current month can be run every day it is entirely rebuilt each time. The monthly report is not built by default because it could take lot of time following the amount of data. If reports were built with the per database option ( -E | --explode ) it must be used too when calling pgbadger to build monthly report: pgbadger -E -X --month-report 2919-08 /var/www/pg_reports/ This is the same when using the rebuild option ( -R | --rebuild ). =head1 BINARY FORMAT Using the binary format it is possible to create custom incremental and cumulative reports. For example, if you want to refresh a pgBadger report each hour from a daily PostgreSQL log file, you can proceed by running each hour the following commands: pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log to generate the incremental data files in binary format. And to generate the fresh HTML report from that binary file: pgbadger sunday/*.bin Or as another example, if you generate one log file per hour and you want reports to be rebuilt each time the log file is rotated, proceed as follows: pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log ... When you want to refresh the HTML report, for example each time after a new binary file is generated, just do the following: pgbadger -o day1_report.html day1/*.bin Adjust the commands to suit your particular needs. =head1 JSON FORMAT JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger result into other monitoring tools like Cacti or Graphite. =head1 AUTHORS pgBadger is an original work from Gilles Darold. The pgBadger logo is an original creation of Damien Cazeils. The pgBadger v4.x design comes from the "Art is code" company. This web site is a work of Gilles Darold. pgBadger is maintained by Gilles Darold and every one who wants to contribute. Many people have contributed to pgBadger, they are all quoted in the Changelog file. =head1 LICENSE pgBadger is free software distributed under the PostgreSQL Licence. Copyright (c) 2012-2022, Gilles Darold A modified version of the SQL::Beautify Perl Module is embedded in pgBadger with copyright (C) 2009 by Jonas Kramer and is published under the terms of the Artistic License 2.0. pgbadger-11.7/pgbadger000077500000000000000000057775061417325540000147520ustar00rootroot00000000000000#!/usr/bin/env perl #------------------------------------------------------------------------------ # # pgBadger - Advanced PostgreSQL log analyzer # # This program is open source, licensed under the PostgreSQL Licence. # For license terms, see the LICENSE file. #------------------------------------------------------------------------------ # # Settings in postgresql.conf # # You should enable SQL query logging with log_min_duration_statement >= 0 # With stderr output # Log line prefix should be: log_line_prefix = '%t [%p]: ' # Log line prefix should be: log_line_prefix = '%t [%p]: user=%u,db=%d ' # Log line prefix should be: log_line_prefix = '%t [%p]: db=%d,user=%u ' # If you need report per client Ip adresses you can add client=%h or remote=%h # pgbadger will also recognized the following form: # log_line_prefix = '%t [%p]: db=%d,user=%u,client=%h ' # or # log_line_prefix = '%t [%p]: user=%u,db=%d,remote=%h ' # With syslog output # Log line prefix should be: log_line_prefix = 'db=%d,user=%u ' # # Additional information that could be collected and reported # log_checkpoints = on # log_connections = on # log_disconnections = on # log_lock_waits = on # log_temp_files = 0 # log_autovacuum_min_duration = 0 #------------------------------------------------------------------------------ use vars qw($VERSION); use strict qw(vars subs); use Getopt::Long qw(:config no_ignore_case bundling); use IO::File; use Benchmark; use File::Basename; use Storable qw(store_fd fd_retrieve); use Time::Local qw(timegm_nocheck timelocal_nocheck timegm timelocal); use POSIX qw(locale_h sys_wait_h _exit strftime); setlocale(LC_NUMERIC, ''); setlocale(LC_ALL, 'C'); use File::Spec qw/ tmpdir /; use File::Temp qw/ tempfile /; use IO::Handle; use IO::Pipe; use FileHandle; use Socket; use constant EBCDIC => "\t" ne "\011"; use Encode qw(encode decode); $VERSION = '11.7'; $SIG{'CHLD'} = 'DEFAULT'; my $TMP_DIR = File::Spec->tmpdir() || '/tmp'; my %RUNNING_PIDS = (); my @tempfiles = (); my $parent_pid = $$; my $interrupt = 0; my $tmp_last_parsed = ''; my $tmp_dblist = ''; my @SQL_ACTION = ('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'COPY FROM', 'COPY TO', 'CTE', 'DDL', 'TCL', 'CURSOR'); my @LATENCY_PERCENTILE = sort {$a <=> $b} (99,95,90); my $graphid = 1; my $NODATA = '
NO DATASET
'; my $MAX_QUERY_LENGTH = 25000; my $terminate = 0; my %CACHE_DNS = (); my $DNSLookupTimeout = 1; # (in seconds) my $EXPLAIN_URL = 'http://explain.depesz.com/?is_public=0&is_anon=0&plan='; my $PID_DIR = $TMP_DIR; my $PID_FILE = undef; my %DBLIST = (); my $DBALL = 'postgres'; my $LOG_EOL_TYPE = 'LF'; # Factor used to estimate the total size of compressed file # when real size can not be obtained (bz2 or remote files) my $BZ_FACTOR = 30; my $GZ_FACTOR = 15; my $XZ_FACTOR = 18; my @E2A = ( 0, 1, 2, 3,156, 9,134,127,151,141,142, 11, 12, 13, 14, 15, 16, 17, 18, 19,157, 10, 8,135, 24, 25,146,143, 28, 29, 30, 31, 128,129,130,131,132,133, 23, 27,136,137,138,139,140, 5, 6, 7, 144,145, 22,147,148,149,150, 4,152,153,154,155, 20, 21,158, 26, 32,160,226,228,224,225,227,229,231,241,162, 46, 60, 40, 43,124, 38,233,234,235,232,237,238,239,236,223, 33, 36, 42, 41, 59, 94, 45, 47,194,196,192,193,195,197,199,209,166, 44, 37, 95, 62, 63, 248,201,202,203,200,205,206,207,204, 96, 58, 35, 64, 39, 61, 34, 216, 97, 98, 99,100,101,102,103,104,105,171,187,240,253,254,177, 176,106,107,108,109,110,111,112,113,114,170,186,230,184,198,164, 181,126,115,116,117,118,119,120,121,122,161,191,208, 91,222,174, 172,163,165,183,169,167,182,188,189,190,221,168,175, 93,180,215, 123, 65, 66, 67, 68, 69, 70, 71, 72, 73,173,244,246,242,243,245, 125, 74, 75, 76, 77, 78, 79, 80, 81, 82,185,251,252,249,250,255, 92,247, 83, 84, 85, 86, 87, 88, 89, 90,178,212,214,210,211,213, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,179,219,220,217,218,159 ); if (EBCDIC && ord('^') == 106) { # as in the BS2000 posix-bc coded character set $E2A[74] = 96; $E2A[95] = 159; $E2A[106] = 94; $E2A[121] = 168; $E2A[161] = 175; $E2A[173] = 221; $E2A[176] = 162; $E2A[186] = 172; $E2A[187] = 91; $E2A[188] = 92; $E2A[192] = 249; $E2A[208] = 166; $E2A[221] = 219; $E2A[224] = 217; $E2A[251] = 123; $E2A[253] = 125; $E2A[255] = 126; } elsif (EBCDIC && ord('^') == 176) { # as in codepage 037 on os400 $E2A[21] = 133; $E2A[37] = 10; $E2A[95] = 172; $E2A[173] = 221; $E2A[176] = 94; $E2A[186] = 91; $E2A[187] = 93; $E2A[189] = 168; } my $pgbadger_logo = ''; my $pgbadger_ico = 'data:image/x-icon;base64, AAABAAEAIyMQAAEABAA8BAAAFgAAACgAAAAjAAAARgAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAgAAGRsZACgqKQA2OTcASEpJAFpdWwBoa2kAeHt5AImMigCeoZ8AsLOxAMTHxQDR1NIA 5enmAPv+/AAAAAAA///////////////////////wAAD///////////H///////////AAAP////// //9Fq7Yv////////8AAA////////8V7u7qD////////wAAD///////8B7qWN5AL///////AAAP// ///y8Avrc3rtMCH/////8AAA/////xABvbAAAJ6kAA/////wAAD////wAG5tQAAADp6RAP////AA AP//MQBd7C2lRESOWe5xAD//8AAA//8APO7iC+7e7u4A3uxwBf/wAAD/9Aju7iAAvu7u0QAN7ukA 7/AAAP/wCe7kAAAF7ugAAAHO6xD/8AAA//AK7CAAAAHO1AAAABnrEP/wAAD/8ArAAAAAAc7kAAAA AIwQ//AAAP/wCjAAAAAC3uQAAAAAHBCf8AAA//AIEBVnIATu5gAXZhAFEP/wAAD/8AIAqxdwBu7p AFoX0QIQ//AAAP/wAAPsBCAL7u4QBwfmAAD/8AAA//AAA8owAC7u7lAAKbYAAJ/wAAD/8AAAAAAA fu7uwAAAAAAA//AAAP/wAAAAAADu7u7jAAAAAAD/8AAA//AAAAAABe7u7uoAAAAAAP/wAAD/8AAA AAAL7u7u7QAAAAAAn/AAAP/wAAAAAB3u7u7uYAAAAAD/8AAA//MAAAAATu7u7u6QAAAAAP/wAAD/ /wAAAAAM7u7u7TAAAAAD//AAAP//IQAAAAKu7u7UAAAAAB//8AAA////IAAAAAju7BAAAAAP///w AAD////2AAA1je7ulUAAA/////AAAP/////xEAnO7u7pIAH/////8AAA//////9CABju6iACP/// ///wAAD////////wAAggAP////////AAAP////////8wAAA/////////8AAA///////////w//// ///////wAAD///////////////////////AAAP/////gAAAA//+//+AAAAD//Af/4AAAAP/4A//g AAAA//AA/+AAAAD/oAA/4AAAAP8AAB/gAAAA/gAAD+AAAADwAAAB4AAAAPAAAADgAAAA4AAAAGAA AADgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAAYAAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAA AOAAAABgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAAAGAAAADgAAAA4AAAAOAAAADgAAAA 8AAAAOAAAADwAAAB4AAAAPwAAAfgAAAA/gAAD+AAAAD/gAA/4AAAAP/AAH/gAAAA//gD/+AAAAD/ /Af/4AAAAP//v//gAAAA/////+AAAAA '; my %CLASS_ERROR_CODE = ( '00' => 'Successful Completion', '01' => 'Warning', '02' => 'No Data (this is also a warning class per the SQL standard)', '03' => 'SQL Statement Not Yet Complete', '08' => 'Connection Exception', '09' => 'Triggered Action Exception', '0A' => 'Feature Not Supported', '0B' => 'Invalid Transaction Initiation', '0F' => 'Locator Exception', '0L' => 'Invalid Grantor', '0P' => 'Invalid Role Specification', '0Z' => 'Diagnostics Exception', '20' => 'Case Not Found', '21' => 'Cardinality Violation', '22' => 'Data Exception', '23' => 'Integrity Constraint Violation', '24' => 'Invalid Cursor State', '25' => 'Invalid Transaction State', '26' => 'Invalid SQL Statement Name', '27' => 'Triggered Data Change Violation', '28' => 'Invalid Authorization Specification', '2B' => 'Dependent Privilege Descriptors Still Exist', '2D' => 'Invalid Transaction Termination', '2F' => 'SQL Routine Exception', '34' => 'Invalid Cursor Name', '38' => 'External Routine Exception', '39' => 'External Routine Invocation Exception', '3B' => 'Savepoint Exception', '3D' => 'Invalid Catalog Name', '3F' => 'Invalid Schema Name', '40' => 'Transaction Rollback', '42' => 'Syntax Error or Access Rule Violation', '44' => 'WITH CHECK OPTION Violation', '53' => 'Insufficient Resources', '54' => 'Program Limit Exceeded', '55' => 'Object Not In Prerequisite State', '57' => 'Operator Intervention', '58' => 'System Error (errors external to PostgreSQL itself)', '72' => 'Snapshot Failure', 'F0' => 'Configuration File Error', 'HV' => 'Foreign Data Wrapper Error (SQL/MED)', 'P0' => 'PL/pgSQL Error', 'XX' => 'Internal Error', ); #### # method used to fork as many child as wanted ## sub spawn { my $coderef = shift; unless (@_ == 0 && $coderef && ref($coderef) eq 'CODE') { print "usage: spawn CODEREF"; exit 0; } my $pid; if (!defined($pid = fork)) { print STDERR "ERROR: cannot fork: $!\n"; return; } elsif ($pid) { $RUNNING_PIDS{$pid} = $pid; return; # the parent } # the child -- go spawn $< = $>; $( = $); # suid progs only exit &$coderef(); } # Command line options my $journalctl_cmd = ''; my $zcat_cmd = 'gunzip -c'; my $zcat = $zcat_cmd; my $bzcat = 'bunzip2 -c'; my $lz4cat = 'lz4cat'; my $ucat = 'unzip -p'; my $xzcat = 'xzcat'; my $zstdcat = 'zstdcat'; my $gzip_uncompress_size = "gunzip -l \"%f\" | grep -E '^\\s*[0-9]+' | awk '{print \$2}'"; # lz4 archive can only contain one file. # Original size can be retrieved only if --content-size has been used for compression # it seems lz4 send output to stderr so redirect to stdout my $lz4_uncompress_size = " lz4 -v -c --list %f 2>&1 |tail -n 2|head -n1 | awk '{print \$6}'"; my $zip_uncompress_size = "unzip -l %f | awk '{if (NR==4) print \$1}'"; my $xz_uncompress_size = "xz --robot -l %f | grep totals | awk '{print \$5}'"; my $zstd_uncompress_size = "zstd -v -l %f |grep Decompressed | awk -F\"[ (]*\" '{print \$5}'"; my $format = ''; my @outfiles = (); my $outdir = ''; my $incremental = ''; my $extra_files = 0; my $help = ''; my $ver = ''; my @dbname = (); my @dbuser = (); my @dbclient = (); my @dbappname = (); my @exclude_user = (); my @exclude_appname = (); my @exclude_db = (); my @exclude_client = (); my @exclude_line = (); my $ident = ''; my $top = 0; my $sample = 3; my $extension = ''; my $maxlength = 100000; my $graph = 1; my $nograph = 0; my $debug = 0; my $nohighlight = 0; my $noprettify = 0; my $from = ''; my $to = ''; my $from_hour = ''; my $to_hour = ''; my $quiet = 0; my $progress = 1; my $error_only = 0; my @exclude_query = (); my @exclude_time = (); my @include_time = (); my $exclude_file = ''; my @include_query = (); my $include_file = ''; my $disable_error = 0; my $disable_hourly = 0; my $disable_type = 0; my $disable_query = 0; my $disable_session = 0; my $disable_connection = 0; my $disable_lock = 0; my $disable_temporary = 0; my $disable_checkpoint = 0; my $disable_autovacuum = 0; my $avg_minutes = 5; my $histo_avg_minutes = 60; my $last_parsed = ''; my $report_title = ''; my $log_line_prefix = ''; my $compiled_prefix = ''; my $project_url = 'http://pgbadger.darold.net/'; my $t_min = 0; my $t_max = 0; my $remove_comment = 0; my $select_only = 0; my $tsung_queries = 0; my $queue_size = 0; my $job_per_file = 0; my $charset = 'utf-8'; my $csv_sep_char = ','; my %current_sessions = (); my %pgb_current_sessions = (); my $incr_date = ''; my $last_incr_date = ''; my $anonymize = 0; my $noclean = 0; my $retention = 0; my $dns_resolv = 0; my $nomultiline = 0; my $noreport = 0; my $log_duration = 0; my $logfile_list = ''; my $enable_checksum = 0; my $timezone = 0; my $opt_timezone = 0; my $pgbouncer_only = 0; my $rebuild = 0; my $week_start_monday = 0; my $iso_week_number = 0; my $use_sessionid_as_pid = 0; my $dump_normalized_only = 0; my $log_timezone = 0; my $opt_log_timezone = 0; my $json_prettify = 0; my $is_tsung_output = 0; my $report_per_database = 0; my $html_outdir = ''; my $param_size_limit = 24; my $month_report = 0; my $day_report = 0; my $noexplain = 0; my $log_command = ''; my $wide_char = 0; my $noweekreport = 0; my $query_numbering = 0; my $keep_comments = 0; my $no_progessbar = 0; my $NUMPROGRESS = 10; my @DIMENSIONS = (800, 300); my $RESRC_URL = ''; my $img_format = 'png'; my @log_files = (); my %prefix_vars = (); my $q_prefix = ''; my @prefix_q_params = (); my %last_execute_stmt = (); my $disable_process_title = 0; my $dump_all_queries = 0; my $compress_extensions = qr/\.(zip|gz|xz|bz2|lz4|zst)$/i; my $remote_host = ''; my $ssh_command = ''; my $ssh_bin = 'ssh'; my $ssh_port = 22; my $ssh_identity = ''; my $ssh_user = ''; my $ssh_timeout = 10; my $ssh_options = "-o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey"; my $force_sample = 0; my $curl_command = 'curl -k -s '; my $sql_prettified = pgFormatter::Beautify->new('colorize' => 1, 'format' => 'html', 'uc_keywords' => 0); # Flag for logs using UTC, in this case we don't autodetect the timezone my $isUTC = 0; # Do not display data in pie where percentage is lower than this value # to avoid label overlapping. my $pie_percentage_limit = 2; # Get the decimal separator my $n = 5 / 2; my $num_sep = ','; $num_sep = ' ' if ($n =~ /,/); # Set iso datetime pattern my $time_pattern = qr/(\d{4})-(\d{2})-(\d{2})[\sT](\d{2}):(\d{2}):(\d{2})/; # Inform the parent that it should stop iterate on parsing other files sub stop_parsing { &logmsg('DEBUG', "Received interrupt signal"); $interrupt = 1; } # With multiprocess we need to wait for all children sub wait_child { my $sig = shift; $interrupt = 2; print STDERR "Received terminating signal ($sig).\n"; 1 while wait != -1; $SIG{INT} = \&wait_child; $SIG{TERM} = \&wait_child; foreach my $f (@tempfiles) { unlink("$f->[1]") if (-e "$f->[1]"); } if ($report_per_database) { unlink("$tmp_dblist"); } if ($last_parsed && -e "$tmp_last_parsed") { unlink("$tmp_last_parsed"); } if ($last_parsed && -e "$last_parsed.tmp") { unlink("$last_parsed.tmp"); } if (-e "$PID_FILE") { unlink("$PID_FILE"); } _exit(0); } $SIG{INT} = \&wait_child; $SIG{TERM} = \&wait_child; if ($^O !~ /MSWin32|dos/i) { $SIG{USR2} = \&stop_parsing; } $| = 1; # get the command line parameters my $result = GetOptions( "a|average=i" => \$avg_minutes, "A|histo-average=i" => \$histo_avg_minutes, "b|begin=s" => \$from, "c|dbclient=s" => \@dbclient, "C|nocomment!" => \$remove_comment, "d|dbname=s" => \@dbname, "D|dns-resolv!" => \$dns_resolv, "e|end=s" => \$to, "E|explode!" => \$report_per_database, "f|format=s" => \$format, "G|nograph!" => \$nograph, "h|help!" => \$help, "H|html-outdir=s" => \$html_outdir, "i|ident=s" => \$ident, "I|incremental!" => \$incremental, "j|jobs=i" => \$queue_size, "J|Jobs=i" => \$job_per_file, "l|last-parsed=s" => \$last_parsed, "L|logfile-list=s" => \$logfile_list, "m|maxlength=i" => \$maxlength, "M|no-multiline!" => \$nomultiline, "N|appname=s" => \@dbappname, "n|nohighlight!" => \$nohighlight, "o|outfile=s" => \@outfiles, "O|outdir=s" => \$outdir, "p|prefix=s" => \$log_line_prefix, "P|no-prettify!" => \$noprettify, "q|quiet!" => \$quiet, "Q|query-numbering!" => \$query_numbering, "r|remote-host=s" => \$remote_host, 'R|retention=i' => \$retention, "s|sample=i" => \$sample, "S|select-only!" => \$select_only, "t|top=i" => \$top, "T|title=s" => \$report_title, "u|dbuser=s" => \@dbuser, "U|exclude-user=s" => \@exclude_user, "v|verbose!" => \$debug, "V|version!" => \$ver, "w|watch-mode!" => \$error_only, "W|wide-char!" => \$wide_char, "x|extension=s" => \$extension, "X|extra-files!" => \$extra_files, "z|zcat=s" => \$zcat, "Z|timezone=s" => \$opt_timezone, "pie-limit=i" => \$pie_percentage_limit, "image-format=s" => \$img_format, "exclude-query=s" => \@exclude_query, "exclude-file=s" => \$exclude_file, "exclude-db=s" => \@exclude_db, "exclude-client=s" => \@exclude_client, "exclude-appname=s" => \@exclude_appname, "include-query=s" => \@include_query, "exclude-line=s" => \@exclude_line, "include-file=s" => \$include_file, "disable-error!" => \$disable_error, "disable-hourly!" => \$disable_hourly, "disable-type!" => \$disable_type, "disable-query!" => \$disable_query, "disable-session!" => \$disable_session, "disable-connection!" => \$disable_connection, "disable-lock!" => \$disable_lock, "disable-temporary!" => \$disable_temporary, "disable-checkpoint!" => \$disable_checkpoint, "disable-autovacuum!" => \$disable_autovacuum, "charset=s" => \$charset, "csv-separator=s" => \$csv_sep_char, "include-time=s" => \@include_time, "exclude-time=s" => \@exclude_time, 'ssh-command=s' => \$ssh_command, 'ssh-program=s' => \$ssh_bin, 'ssh-port=i' => \$ssh_port, 'ssh-identity=s' => \$ssh_identity, 'ssh-option=s' => \$ssh_options, 'ssh-user=s' => \$ssh_user, 'ssh-timeout=i' => \$ssh_timeout, 'anonymize!' => \$anonymize, 'noclean!' => \$noclean, 'noreport!' => \$noreport, 'log-duration!' => \$log_duration, 'enable-checksum!' => \$enable_checksum, 'journalctl=s' => \$journalctl_cmd, 'pid-dir=s' => \$PID_DIR, 'pid-file=s' => \$PID_FILE, 'rebuild!' => \$rebuild, 'pgbouncer-only!' => \$pgbouncer_only, 'start-monday!' => \$week_start_monday, 'iso-week-number!' => \$iso_week_number, 'normalized-only!' => \$dump_normalized_only, 'log-timezone=i' => \$opt_log_timezone, 'prettify-json!' => \$json_prettify, 'month-report=s' => \$month_report, 'day-report=s' => \$day_report, 'noexplain!' => \$noexplain, 'command=s' => \$log_command, 'no-week!' => \$noweekreport, 'explain-url=s' => \$EXPLAIN_URL, 'tempdir=s' => \$TMP_DIR, 'no-process-info!' => \$disable_process_title, 'dump-all-queries!' => \$dump_all_queries, 'keep-comments!' => \$keep_comments, 'no-progressbar!' => \$no_progessbar ); die "FATAL: use pgbadger --help\n" if (not $result); # Force rebuild mode when a month report is asked $rebuild = 1 if ($month_report); $rebuild = 2 if ($day_report); # Set report title $report_title = &escape_html($report_title) if $report_title; # Show version and exit if asked if ($ver) { print "pgBadger version $VERSION\n"; exit 0; } &usage() if ($help); # Create temporary file directory if not exists mkdir("$TMP_DIR") if (!-d "$TMP_DIR"); if (!-d "$TMP_DIR") { die("Can not use temporary directory $TMP_DIR.\n"); } # Try to load Digest::MD5 when asked if ($enable_checksum) { if (eval {require Digest::MD5;1} ne 1) { die("Can not load Perl module Digest::MD5.\n"); } else { Digest::MD5->import('md5_hex'); } } # Check if an other process is already running unless ($PID_FILE) { $PID_FILE = $PID_DIR . '/pgbadger.pid'; } if (-e "$PID_FILE") { my $is_running = 2; if ($^O !~ /MSWin32|dos/i) { eval { $is_running = `ps auwx | grep pgbadger | grep -v grep | wc -l`; chomp($is_running); }; } if (!$@ && ($is_running <= 1)) { unlink("$PID_FILE"); } else { print "FATAL: an other process is already started or remove the file, see $PID_FILE\n"; exit 3; } } # Create pid file if (open(my $out, '>', $PID_FILE)) { print $out $$; close($out); } else { print "FATAL: can't create pid file $PID_FILE, $!\n"; exit 3; } # Rewrite some command line arguments as lists &compute_arg_list(); # If pgBadger must parse remote files set the ssh command # If no user defined ssh command have been set my $remote_command = ''; if ($remote_host && !$ssh_command) { $remote_command = &set_ssh_command($ssh_command, $remote_host); } # Add journalctl command to the file list if not already found if ($journalctl_cmd) { if (!grep(/^\Q$journalctl_cmd\E$/, @ARGV)) { $journalctl_cmd .= " --output='short-iso'"; push(@ARGV, $journalctl_cmd); } } # Add custom command to file list if ($log_command) { if (!grep(/^\Q$log_command\E$/, @ARGV)) { push(@ARGV, $log_command); } } # Log files to be parsed are passed as command line arguments my $empty_files = 1; if ($#ARGV >= 0) { foreach my $file (@ARGV) { push(@log_files, &set_file_list($file)); } } if (!$incremental && $html_outdir) { localdie("FATAL: parameter -H, --html-outdir can only be used with incremental mode.\n"); } # Read list of log file to parse from a file if ($logfile_list) { if (!-e $logfile_list) { localdie("FATAL: logfile list $logfile_list must exist!\n"); } my $in = undef; if (not open($in, "<", $logfile_list)) { localdie("FATAL: can not read logfile list $logfile_list, $!.\n"); } my @files = <$in>; close($in); foreach my $file (@files) { chomp($file); $file =~ s/\r//; if ($file eq '-') { localdie("FATAL: stdin input - can not be used with logfile list.\n"); } push(@log_files, &set_file_list($file)); } } # Do not warn if all log files are empty if (!$rebuild && $empty_files) { &logmsg('DEBUG', "All log files are empty, exiting..."); unlink("$PID_FILE"); exit 0; } # Logfile is a mandatory parameter when journalctl command is not set. if ( !$rebuild && ($#log_files < 0) && !$journalctl_cmd && !$log_command) { if (!$quiet) { localdie("FATAL: you must give a log file at command line parameter.\n\n"); } else { unlink("$PID_FILE"); exit 4; } } if ($#outfiles >= 1 && ($dump_normalized_only || $dump_all_queries)) { localdie("FATAL: dump of normalized queries can not ne used with multiple output.\n\n"); } # Remove follow option from journalctl command to prevent infinit loop if ($journalctl_cmd) { $journalctl_cmd =~ s/(-f|--follow)\b//; } # Quiet mode is forced with progress bar $progress = 0 if ($quiet || $no_progessbar); # Set the default number minutes for queries and connections average $avg_minutes ||= 5; $avg_minutes = 60 if ($avg_minutes > 60); $avg_minutes = 1 if ($avg_minutes < 1); $histo_avg_minutes ||= 60; $histo_avg_minutes = 60 if ($histo_avg_minutes > 60); $histo_avg_minutes = 1 if ($histo_avg_minutes < 1); my @avgs = (); for (my $i = 0 ; $i < 60 ; $i += $avg_minutes) { push(@avgs, sprintf("%02d", $i)); } my @histo_avgs = (); for (my $i = 0 ; $i < 60 ; $i += $histo_avg_minutes) { push(@histo_avgs, sprintf("%02d", $i)); } # Set error like log level regex my $parse_regex = qr/^(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT|LOCATION)/; my $full_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT)/; my $main_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC)/; my $main_log_regex = qr/^(LOG|WARNING|ERROR|FATAL|PANIC)/; # Set syslog prefix regex my $other_syslog_line = ''; my $pgbouncer_log_format = ''; my $pgbouncer_log_parse1 = ''; my $pgbouncer_log_parse2 = ''; my $pgbouncer_log_parse3 = ''; # Variable to store parsed data following the line prefix my @prefix_params = (); my @pgb_prefix_params = (); my @pgb_prefix_parse1 = (); my @pgb_prefix_parse2 = (); my @pgb_prefix_parse3 = (); # Force incremental mode when rebuild mode is used if ($rebuild && !$incremental) { print STDERR "WARNING: --rebuild require incremental mode, activating it.\n" if (!$month_report || !$day_report); $incremental = 1; } &logmsg('DEBUG', "pgBadger version $VERSION." ); # set timezone to use &set_timezone(1); # Set default top query $top ||= 20; # Set output file my $outfile = ''; $outfile = $outfiles[0] if ($#outfiles >= 0); if (($dump_normalized_only || $dump_all_queries) && $outfile && $outfile !~ /\.txt$/){ localdie("FATAL: dump of normalized queries can be done in text output format, please use .txt extension.\n\n"); } # With multiple output format we must use a temporary binary file my $dft_extens = ''; if ($#outfiles >= 1) { # We can not have multiple output in incremental mode if ($incremental) { localdie("FATAL: you can not use multiple output format with incremental mode.\n\n"); } # Set temporary binary file. $outfile = $TMP_DIR . "/pgbadger_tmp_$$.bin"; # Remove the default output format for the moment # otherwise all dump will have the same output $dft_extens = $extension; $extension = ''; } elsif ($#outfiles == -1) { $extension = 'txt' if ($dump_normalized_only || $dump_all_queries); ($extension) ? push(@outfiles, 'out.' . $extension) : push(@outfiles, 'out.html'); map { s/\.text/.txt/; } @outfiles; } # Set the default extension and output format, load JSON Perl module if required # Force text output with normalized query list only and disable incremental report # Set default filename of the output file my ($current_out_file, $extens) = &set_output_extension($outfile, $extension); # Set default syslog ident name $ident ||= 'postgres'; # Set default pie percentage limit or fix value $pie_percentage_limit = 0 if ($pie_percentage_limit < 0); $pie_percentage_limit = 2 if ($pie_percentage_limit eq ''); $pie_percentage_limit = 100 if ($pie_percentage_limit > 100); # Set default download image format $img_format = lc($img_format); $img_format = 'jpeg' if ($img_format eq 'jpg'); $img_format = 'png' if ($img_format ne 'jpeg'); # Extract the output directory from outfile so that graphs will # be created in the same directory if ($current_out_file ne '-') { if (!$html_outdir && !$outdir) { my @infs = fileparse($current_out_file); if ($infs[0] ne '') { $outdir = $infs[1]; } else { # maybe a confusion between -O and -o localdie("FATAL: output file $current_out_file is a directory, should be a file\nor maybe you want to use -O | --outdir option instead.\n"); } } elsif ($outdir && !-d "$outdir") { # An output directory has been passed as command line parameter localdie("FATAL: $outdir is not a directory or doesn't exist.\n"); } elsif ($html_outdir && !-d "$html_outdir") { # An HTML output directory has been passed as command line parameter localdie("FATAL: $html_outdir is not a directory or doesn't exist.\n"); } $current_out_file = basename($current_out_file); $current_out_file = ($html_outdir || $outdir) . '/' . $current_out_file; } # Remove graph support if output is not html $graph = 0 unless ($extens eq 'html' or $extens eq 'binary' or $extens eq 'json'); $graph = 0 if ($nograph); # Set some default values my $end_top = $top - 1; $queue_size ||= 1; $job_per_file ||= 1; if ($^O =~ /MSWin32|dos/i) { if ( ($queue_size > 1) || ($job_per_file > 1) ) { print STDERR "WARNING: parallel processing is not supported on this platform.\n"; $queue_size = 1; $job_per_file = 1; } } if ($extens eq 'tsung') { # Open filehandle my $fh = new IO::File ">$current_out_file"; if (not defined $fh) { localdie("FATAL: can't write to $current_out_file, $!\n"); } print $fh qq{ }; $fh->close(); } else { # Test file creation before going to parse log my $tmpfh = new IO::File ">$current_out_file"; if (not defined $tmpfh) { localdie("FATAL: can't write to $current_out_file, $!\n"); } $tmpfh->close(); unlink($current_out_file) if (-e $current_out_file); } # -w and --disable-error can't go together if ($error_only && $disable_error) { localdie("FATAL: please choose between no event report and reporting events only.\n"); } # Set default search pattern for database, user name, application name and host in log_line_prefix my $regex_prefix_dbname = qr/(?:db|database)=([^,]*)/; my $regex_prefix_dbuser = qr/(?:user|usr)=([^,]*)/; my $regex_prefix_dbclient = qr/(?:client|remote|ip|host|connection_source)=([^,\(]*)/; my $regex_prefix_dbappname = qr/(?:app|application)=([^,]*)/; my $regex_prefix_sqlstate = qr/(?:error_code|state|state_code)=([^,]*)/; my $regex_prefix_backendtype = qr/(?:backend_type|btype)=([^,]*)/; # Set pattern to look for query type my $action_regex = qr/^[\s\(]*(DELETE|INSERT|UPDATE|SELECT|COPY|WITH|CREATE|DROP|ALTER|TRUNCATE|BEGIN|COMMIT|ROLLBACK|START|END|SAVEPOINT|DECLARE|CLOSE|FETCH|MOVE)/is; # Loading excluded query from file if any if ($exclude_file) { open(my $in, '<', $exclude_file) or localdie("FATAL: can't read file $exclude_file: $!\n"); my @exclq = <$in>; close($in); chomp(@exclq); foreach my $r (@exclq) { $r =~ s/\r//; &check_regex($r, '--exclude-file'); } push(@exclude_query, @exclq); } # Testing regex syntax if ($#exclude_query >= 0) { foreach my $r (@exclude_query) { &check_regex($r, '--exclude-query'); } } # Testing regex syntax if ($#exclude_time >= 0) { foreach my $r (@exclude_time) { &check_regex($r, '--exclude-time'); } } # # Testing regex syntax if ($#include_time >= 0) { foreach my $r (@include_time) { &check_regex($r, '--include-time'); } } # Loading included query from file if any if ($include_file) { open(my $in, '<', $include_file) or localdie("FATAL: can't read file $include_file: $!\n"); my @exclq = <$in>; close($in); chomp(@exclq); foreach my $r (@exclq) { $r =~ s/\r//; &check_regex($r, '--include-file'); } push(@include_query, @exclq); } # Testing regex syntax if ($#include_query >= 0) { foreach my $r (@include_query) { &check_regex($r, '--include-query'); } } # Check start/end date time if ($from) { if ( $from =~ /^(\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) { # only time, trick around the date part my $fractional_seconds = $4 || "0"; $from_hour = "$1:$2:$3.$fractional_seconds"; &logmsg('DEBUG', "Setting begin time to [$from_hour]" ); } elsif( $from =~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/ ) { my $fractional_seconds = $7 || "0"; $from = "$1-$2-$3 $4:$5:$6.$fractional_seconds"; &logmsg('DEBUG', "Setting begin datetime to [$from]" ); } else { localdie("FATAL: bad format for begin datetime/time, should be yyyy-mm-dd hh:mm:ss.l+tz or hh:mm:ss.l+tz\n"); } } if ($to) { if ( $to =~ /^(\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) { # only time, trick around the date part my $fractional_seconds = $4 || "0"; $to_hour = "$1:$2:$3.$fractional_seconds"; &logmsg('DEBUG', "Setting end time to [$to_hour]" ); } elsif( $to =~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) { my $fractional_seconds = $7 || "0"; $to = "$1-$2-$3 $4:$5:$6.$fractional_seconds"; &logmsg('DEBUG', "Setting end time to [$to]" ); } else { localdie("FATAL: bad format for ending datetime, should be yyyy-mm-dd hh:mm:ss.l+tz or hh:mm:ss.l+tz\n"); } } if ($from && $to && ($from gt $to)) { localdie("FATAL: begin date is after end date!\n") ; } # Stores the last parsed line from log file to allow incremental parsing my $LAST_LINE = ''; # Set the level of the data aggregator, can be minute, hour or day follow the # size of the log file. my $LEVEL = 'hour'; # Month names my %month_abbr = ( 'Jan' => '01', 'Feb' => '02', 'Mar' => '03', 'Apr' => '04', 'May' => '05', 'Jun' => '06', 'Jul' => '07', 'Aug' => '08', 'Sep' => '09', 'Oct' => '10', 'Nov' => '11', 'Dec' => '12' ); my %abbr_month = ( '01' => 'Jan', '02' => 'Feb', '03' => 'Mar', '04' => 'Apr', '05' => 'May', '06' => 'Jun', '07' => 'Jul', '08' => 'Aug', '09' => 'Sep', '10' => 'Oct', '11' => 'Nov', '12' => 'Dec' ); # Inbounds of query times histogram my @histogram_query_time = (0, 1, 5, 10, 25, 50, 100, 500, 1000, 10000); # Inbounds of session times histogram my @histogram_session_time = (0, 500, 1000, 30000, 60000, 600000, 1800000, 3600000, 28800000); # Where statistics are stored my %overall_stat = (); my %pgb_overall_stat = (); my %overall_checkpoint = (); my %top_slowest = (); my %normalyzed_info = (); my %error_info = (); my %pgb_error_info = (); my %pgb_pool_info = (); my %logs_type = (); my %errors_code = (); my %per_minute_info = (); my %pgb_per_minute_info = (); my %lock_info = (); my %tempfile_info = (); my %cancelled_info = (); my %connection_info = (); my %pgb_connection_info = (); my %database_info = (); my %application_info = (); my %user_info = (); my %host_info = (); my %session_info = (); my %pgb_session_info = (); my %conn_received = (); my %checkpoint_info = (); my %autovacuum_info = (); my %autoanalyze_info = (); my @graph_values = (); my %cur_info = (); my %cur_temp_info = (); my %cur_plan_info = (); my %cur_cancel_info = (); my %cur_lock_info = (); my $nlines = 0; my %last_line = (); my %pgb_last_line = (); our %saved_last_line = (); our %pgb_saved_last_line= (); my %tsung_session = (); my %top_locked_info = (); my %top_tempfile_info = (); my %top_cancelled_info = (); my %drawn_graphs = (); my %cur_bind_info = (); my %prepare_info = (); my %bind_info = (); # Global output filehandle my $fh = undef; my $t0 = Benchmark->new; # Write resources files from __DATA__ section if they have not been already copied # and return the HTML links to that files. If --extra-file is not used returns the # CSS and JS code to be embeded in HTML files my @jscode = &write_resources(); # Automatically set parameters with incremental mode if ($incremental) { # In incremental mode an output directory must be set if (!$html_outdir && !$outdir) { localdie("FATAL: you must specify an output directory with incremental mode, see -O or --outdir.\n") } # Ensure this is not a relative path if ($outdir && dirname($outdir) eq '.') { localdie("FATAL: output directory ($outdir) is not an absolute path.\n"); } if ($html_outdir && dirname($html_outdir) eq '.') { localdie("FATAL: output HTML directory ($html_outdir) is not an absolute path.\n"); } # Ensure that the directory already exists if ($outdir && !-d $outdir) { localdie("FATAL: output directory $outdir does not exists.\n"); } # Verify that the HTML outdir exixts when specified if ($html_outdir && !-d $html_outdir) { localdie("FATAL: output HTML directory $html_outdir does not exists.\n"); } # Set default last parsed file in incremental mode if (!$last_parsed && $incremental) { $last_parsed = $outdir . '/LAST_PARSED'; } $current_out_file = 'index.html'; # Set default output format $extens = 'binary'; if ($rebuild && !$month_report && !$day_report) { # Look for directory where report must be generated again my @build_directories = (); # Find directories that shoud be rebuilt unless(opendir(DIR, "$outdir")) { localdie("FATAL: can't opendir $outdir: $!\n"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("FATAL: can't opendir $outdir/$y: $!\n"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("FATAL: can't opendir $outdir/$y/$m: $!\n"); } my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { unless(opendir(DIR, "$outdir/$y/$m/$d")) { localdie("FATAL: can't opendir $outdir/$y/$m/$d: $!\n"); } my @binfiles = grep { $_ =~ /\.bin$/ } readdir(DIR); closedir DIR; push(@build_directories, "$y-$m-$d") if ($#binfiles >= 0); } } } &build_incremental_reports(@build_directories); my $t2 = Benchmark->new; my $td = timediff($t2, $t0); &logmsg('DEBUG', "rebuilding reports took: " . timestr($td)); # Remove pidfile unlink("$PID_FILE"); exit 0; } elsif ($month_report) { # Look for directory where cumulative report must be generated my @build_directories = (); # Get year+month as a path $month_report =~ s#/#-#g; my $month_path = $month_report; $month_path =~ s#-#/#g; if ($month_path !~ m#^\d{4}/\d{2}$#) { localdie("FATAL: invalid format YYYY-MM for --month-report option: $month_report"); } &logmsg('DEBUG', "building month report into $outdir/$month_path"); # Find days directories that shoud be used to build the monthly report unless(opendir(DIR, "$outdir/$month_path")) { localdie("FATAL: can't opendir $outdir/$month_path: $!\n"); } my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { unless(opendir(DIR, "$outdir/$month_path/$d")) { localdie("FATAL: can't opendir $outdir/$month_path/$d: $!\n"); } my @binfiles = grep { $_ =~ /\.bin$/ } readdir(DIR); closedir DIR; push(@build_directories, "$month_report-$d") if ($#binfiles >= 0); } &build_month_reports($month_path, @build_directories); my $t2 = Benchmark->new; my $td = timediff($t2, $t0); &logmsg('DEBUG', "building month report took: " . timestr($td)); # Remove pidfile unlink("$PID_FILE"); exit 0; } elsif ($day_report) { # Look for directory where cumulative report must be generated my @build_directories = (); # Get year+month as a path $day_report =~ s#/#-#g; my $day_path = $day_report; $day_path =~ s#-#/#g; if ($day_path !~ m#^\d{4}/\d{2}\/\d{2}$#) { localdie("FATAL: invalid format YYYY-MM-DD for --day-report option: $day_report"); } &logmsg('DEBUG', "building day report into $outdir/$day_path"); # Find days directories that shoud be used to build the monthly report unless(opendir(DIR, "$outdir/$day_path")) { localdie("FATAL: can't opendir $outdir/$day_path: $!\n"); } my @binfiles = grep { $_ =~ /\.bin$/ } readdir(DIR); closedir DIR; push(@build_directories, "$day_report") if ($#binfiles >= 0); &build_day_reports($day_path, @build_directories); my $t2 = Benchmark->new; my $td = timediff($t2, $t0); &logmsg('DEBUG', "building day report took: " . timestr($td)); # Remove pidfile unlink("$PID_FILE"); exit 0; } } else { # Extra files for resources are not allowed without incremental mode $extra_files = 0; } # Reading last line parsed if ($last_parsed && -e $last_parsed) { if (open(my $in, '<', $last_parsed)) { my @content = <$in>; close($in); foreach my $line (@content) { chomp($line); next if (!$line); my ($datetime, $current_pos, $orig, @others) = split(/\t/, $line); # Last parsed line with pgbouncer log starts with this keyword if ($datetime eq 'pgbouncer') { $pgb_saved_last_line{datetime} = $current_pos; $pgb_saved_last_line{current_pos} = $orig; $pgb_saved_last_line{orig} = join("\t", @others); } else { $saved_last_line{datetime} = $datetime; $saved_last_line{current_pos} = $current_pos; $saved_last_line{orig} = $orig; } &logmsg('DEBUG', "Found log offset " . ($saved_last_line{current_pos} || $pgb_saved_last_line{current_pos}) . " in file $last_parsed"); } # Those two log format must be read from start of the file if ( ($format eq 'binary') || ($format eq 'csv') ) { $saved_last_line{current_pos} = 0; $pgb_saved_last_line{current_pos} = 0 if ($format eq 'binary'); } } else { localdie("FATAL: can't read last parsed line from $last_parsed, $!\n"); } } $tmp_last_parsed = 'tmp_' . basename($last_parsed) if ($last_parsed); $tmp_last_parsed = "$TMP_DIR/$tmp_last_parsed"; $tmp_dblist = "$TMP_DIR/dblist.tmp"; # Clean the incremental directory if the feature is not disabled if (!$noclean && $outdir && ($saved_last_line{datetime} || $pgb_saved_last_line{datetime})) { my $last_year = ''; my $last_month = ''; my $last_day = ''; # Search the current week following the last parse date if ( ($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) ) { $last_year = $1; $last_month = $2; $last_day = $3; } # Get the week number following the date my $wn = &get_week_number($last_year, $last_month, $last_day); # Get the days of the current week where binary files must be preserved my $getwnb = $wn; $getwnb-- if (!$iso_week_number); my @wdays = &get_wdays_per_month($getwnb, "$last_year-$last_month"); # Find obsolete dir days that shoud be cleaned unless(opendir(DIR, "$outdir")) { localdie("FATAL: can't opendir $outdir: $!\n"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @obsolete_days = (); foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("FATAL: can't opendir $outdir/$y: $!\n"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("FATAL: can't opendir $outdir/$y/$m: $!\n"); } my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { if ("$y-$m-$d" lt $wdays[0]) { push(@obsolete_days, "$outdir/$y/$m/$d"); } } } } foreach my $p (@obsolete_days) { unless(opendir(DIR, "$p")) { localdie("FATAL: can't opendir $p: $!\n"); } my @hfiles = grep { $_ =~ /\.(html|txt|tsung|json)$/i } readdir(DIR); next if ($#hfiles == -1); # do not remove files if report file has not been generated seekdir(DIR, 0); my @bfiles = grep { $_ =~ /\.bin$/i } readdir(DIR); closedir DIR; foreach my $f (@bfiles) { &logmsg('DEBUG', "Removing obsolete binary file: $p/$f"); unlink("$p/$f"); } } } # Clear storage when a retention is specified in incremental mode if ( $outdir && $retention && ($saved_last_line{datetime} || $pgb_saved_last_line{datetime}) ) { if (($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /)) { # Search the current week following the last parse date my $limit = $1; my $wn = &get_week_number($1, $2, $3); # Case of year overlap if (($wn - $retention) < 1) { # Rewind to previous year $limit--; # Get number of last week of previous year, can be 52 or 53 my $prevwn = &get_week_number($limit, 12, 31); # Add week number including retention to the previous year $limit .= sprintf("%02d", $prevwn - abs($wn - $retention)); } else { $limit .= sprintf("%02d", $wn - $retention); } &logmsg('DEBUG', "Retention cleanup: directories and files older than <$limit> will be removed"); # Find obsolete weeks dir that shoud be cleaned unless(opendir(DIR, "$outdir")) { localdie("FATAL: can't opendir $outdir: $!\n"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @obsolete_weeks = (); foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("FATAL: can't opendir $outdir/$y: $!\n"); } my @weeks = grep { $_ =~ /^week-\d+$/ } readdir(DIR); closedir DIR; foreach my $w (sort { $a <=> $b } @weeks) { $w =~ /^week-(\d+)$/; if ("$y$1" lt $limit) { &logmsg('DEBUG', "Removing obsolete week directory $outdir/$y/week-$1"); &cleanup_directory("$outdir/$y/week-$1", 1); push(@obsolete_weeks, "$y$1"); } } } # Now removed the corresponding days foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("FATAL: can't opendir $outdir/$y: $!\n"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @rmmonths = (); foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("FATAL: can't opendir $outdir/$y/$m: $!\n"); } my @rmdays = (); my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { my $weekNumber = ''; if (!$iso_week_number) { if (!$week_start_monday) { $weekNumber = sprintf("%02d", POSIX::strftime("%U", 1, 1, 1, $d, $m - 1, $y - 1900)+1); } else { $weekNumber = sprintf("%02d", POSIX::strftime("%W", 1, 1, 1, $d, $m - 1, $y - 1900)+1); } } else { $weekNumber = sprintf("%02d", POSIX::strftime("%V", 1, 1, 1, $d, $m - 1, $y - 1900)+1); } if ($#obsolete_weeks >= 0) { if (grep(/^$y$weekNumber$/, @obsolete_weeks)) { &logmsg('DEBUG', "Removing obsolete directory $outdir/$y/$m/$d"); &cleanup_directory("$outdir/$y/$m/$d", 1); push(@rmdays, $d); } } else { # Remove obsolete days when we are in binary mode # with noreport - there's no week-N directory my $diff_day = $retention * 7 * 86400; my $oldday = POSIX::strftime("%s", 1,1,1,$d,$m-1,$y-1900); my $lastday = $oldday; if (($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /)) { $lastday = POSIX::strftime("%s", 1,1,1,$3,$2-1,$1-1900); } if (($lastday - $oldday) > $diff_day) { &logmsg('DEBUG', "Removing obsolete directory $outdir/$y/$m/$d"); &cleanup_directory("$outdir/$y/$m/$d", 1); push(@rmdays, $d); } } } if ($#ddays == $#rmdays) { &logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y/$m"); rmdir("$outdir/$y/$m"); push(@rmmonths, $m); } } if ($#dmonths == $#rmmonths) { &logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y"); rmdir("$outdir/$y"); } } } } # Main loop reading log files my $global_totalsize = 0; my @given_log_files = ( @log_files ); chomp(@given_log_files); # Store globaly total size for each log files my %file_size = (); foreach my $logfile ( @given_log_files ) { $file_size{$logfile} = &get_file_size($logfile); $global_totalsize += $file_size{$logfile} if ($file_size{$logfile} > 0); } # Verify that the file has not changed for incremental move if (($incremental || $last_parsed) && !$remote_host) { my @tmpfilelist = (); # Removed files that have already been parsed during previous runs foreach my $f (@given_log_files) { if ($f eq '-') { &logmsg('DEBUG', "waiting for log entries from stdin."); $saved_last_line{current_pos} = 0; push(@tmpfilelist, $f); } elsif ($f =~ /\.bin$/) { &logmsg('DEBUG', "binary file \"$f\" as input, there is no log to parse."); $saved_last_line{current_pos} = 0; push(@tmpfilelist, $f); } elsif ( $journalctl_cmd && ($f eq $journalctl_cmd) ) { my $since = ''; if ( ($journalctl_cmd !~ /--since|-S/) && ($saved_last_line{datetime} =~ /^(\d+)-(\d+)-(\d+).(\d+):(\d+):(\d+)/) ) { $since = " --since='$1-$2-$3 $4:$5:$6'"; } &logmsg('DEBUG', "journalctl call will start since: $saved_last_line{datetime}"); my $new_journalctl_cmd = "$f$since"; push(@tmpfilelist, $new_journalctl_cmd); $file_size{$new_journalctl_cmd} = $file_size{$f}; } elsif ( $log_command && ($f eq $log_command) ) { &logmsg('DEBUG', "custom command waiting for log entries from stdin."); $saved_last_line{current_pos} = 0; push(@tmpfilelist, $f); } else { # Auto detect log format for proper parsing my $fmt = $format || 'stderr'; $fmt = autodetect_format($f, $file_size{$f}); $fmt ||= $format; # Set regex to parse the log file $fmt = set_parser_regex($fmt); if (($fmt ne 'pgbouncer') && ($saved_last_line{current_pos} > 0)) { my ($retcode, $msg) = &check_file_changed($f, $file_size{$f}, $fmt, $saved_last_line{datetime}, $saved_last_line{current_pos}); if (!$retcode) { &logmsg('DEBUG', "this file has already been parsed: $f, $msg"); } else { push(@tmpfilelist, $f); } } elsif (($fmt eq 'pgbouncer') && ($pgb_saved_last_line{current_pos} > 0)) { my ($retcode, $msg) = &check_file_changed($f, $file_size{$f}, $fmt, $pgb_saved_last_line{datetime}, $pgb_saved_last_line{current_pos}); if (!$retcode) { &logmsg('DEBUG', "this file has already been parsed: $f, $msg"); } else { push(@tmpfilelist, $f); } } else { push(@tmpfilelist, $f); } } } @given_log_files = (); push(@given_log_files, @tmpfilelist); } # Pipe used for progress bar in multiprocess my $pipe = undef; # Seeking to an old log position is not possible outside incremental mode if (!$last_parsed || !exists $saved_last_line{current_pos}) { $saved_last_line{current_pos} = 0; $pgb_saved_last_line{current_pos} = 0; } if ($dump_all_queries) { $fh = new IO::File; $fh->open($outfiles[0], '>') or localdie("FATAL: can't open output file $outfiles[0], $!\n"); } #### # Start parsing all log files #### # Number of running process my $child_count = 0; # Set max number of parallel process my $parallel_process = 0; # Open a pipe for interprocess communication my $reader = new IO::Handle; my $writer = new IO::Handle; # Fork the logger process if ($^O !~ /MSWin32|dos/i && $progress) { $pipe = IO::Pipe->new($reader, $writer); $writer->autoflush(1); spawn sub { &multiprocess_progressbar($global_totalsize); }; } # Initialise the list of reports to produce with the default report # if $report_per_database is enabled there will be a report for each # database. Information not related to a database (checkpoint, pgbouncer # statistics, etc.) will be included in the default report which should # be the postgres database to be read by the DBA of the PostgreSQL cluster. $DBLIST{$DBALL} = 1; # Parse each log file following the multiprocess mode chosen (-j or -J) foreach my $logfile ( @given_log_files ) { # If we just want to build incremental reports from binary files # just build the list of input directories with binary files if ($incremental && $html_outdir && !$outdir) { my $incr_date = ''; my $binpath = ''; if ($logfile =~ /^(.*\/)(\d+)\/(\d+)\/(\d+)\/[^\/]+\.bin$/) { $binpath = $1; $incr_date = "$2-$3-$4"; } # Mark the directory as needing index update if (open(my $out, '>>', "$last_parsed.tmp")) { print $out "$binpath$incr_date\n"; close($out); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed.tmp, $!"); } next; } # Confirm if we can use multiprocess for this file my $pstatus = confirm_multiprocess($logfile); if ($pstatus >= 0) { if ($pstatus = 1 && $job_per_file > 1) { $parallel_process = $job_per_file; } else { $parallel_process = $queue_size; } } else { &logmsg('DEBUG', "parallel processing will not ne used."); $parallel_process = 1; } # Wait until a child dies if max parallel processes is reach while ($child_count >= $parallel_process) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { $child_count--; delete $RUNNING_PIDS{$kid}; } sleep(1); } # Get log format of the current file my $fmt = $format || 'stderr'; my $logfile_orig = $logfile; if ($logfile ne '-' && !$journalctl_cmd && !$log_command) { $fmt = &autodetect_format($logfile, $file_size{$logfile}); $fmt ||= $format; # Remove log format from filename if any $logfile =~ s/:(stderr|csv|syslog|pgbouncer|jsonlog|logplex|rds|redshift)\d*$//i; &logmsg('DEBUG', "pgBadger will use log format $fmt to parse $logfile."); } else { &logmsg('DEBUG', "Can not autodetect log format, assuming $fmt."); } # Set the timezone to use &set_timezone(); # Set the regex to parse the log file following the format $fmt = set_parser_regex($fmt); # Do not use split method with remote and compressed files, stdin, custom or journalctl command if ( ($parallel_process > 1) && ($queue_size > 1) && ($logfile !~ $compress_extensions) && ($logfile !~ /\.bin$/i) && ($logfile ne '-') && ($logfile !~ /^(http[s]*|ftp[s]*|ssh):/i) && (!$journalctl_cmd || ($logfile !~ /\Q$journalctl_cmd\E/)) && (!$log_command || ($logfile !~ /\Q$log_command\E/)) ) { # Create multiple processes to parse one log file by chunks of data my @chunks = split_logfile($logfile, $file_size{$logfile_orig}, ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos}); &logmsg('DEBUG', "The following boundaries will be used to parse file $logfile, " . join('|', @chunks)); for (my $i = 0; $i < $#chunks; $i++) { while ($child_count >= $parallel_process) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { $child_count--; delete $RUNNING_PIDS{$kid}; } sleep(1); } localdie("FATAL: Abort signal received when processing to next chunk\n") if ($interrupt == 2); last if ($interrupt); push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, O_TEMPORARY => 1, UNLINK => 1 ) ]); spawn sub { &process_file($logfile, $file_size{$logfile_orig}, $fmt, $tempfiles[-1]->[0], $chunks[$i], $chunks[$i+1], $i); }; $child_count++; } } else { # Start parsing one file per parallel process if ($^O !~ /MSWin32|dos/i) { push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, UNLINK => 1 ) ]); spawn sub { &process_file($logfile, $file_size{$logfile_orig}, $fmt, $tempfiles[-1]->[0], ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos}); }; $child_count++; } else { &process_file($logfile, $file_size{$logfile_orig}, $fmt, undef, ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos}); } } localdie("FATAL: Abort signal received when processing next file\n") if ($interrupt == 2); last if ($interrupt); } # Wait for all child processes to localdie except for the logger # On Windows OS $progress is disabled so we don't go here while (scalar keys %RUNNING_PIDS > $progress) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { delete $RUNNING_PIDS{$kid}; } sleep(1); } # Terminate the process logger if ($^O !~ /MSWin32|dos/i) { foreach my $k (keys %RUNNING_PIDS) { kill('USR1', $k); %RUNNING_PIDS = (); } # Clear previous statistics &init_stats_vars(); } # Load all data gathered by all the different processes foreach my $f (@tempfiles) { next if (!-e "$f->[1]" || -z "$f->[1]"); my $fht = new IO::File; $fht->open("< $f->[1]") or localdie("FATAL: can't open temp file $f->[1], $!\n"); load_stats($fht); $fht->close(); } # Get last line parsed from all process if ($last_parsed) { &logmsg('DEBUG', "Reading temporary last parsed line from $tmp_last_parsed"); if (open(my $in, '<', $tmp_last_parsed) ) { while (my $line = <$in>) { chomp($line); $line =~ s/\r//; my ($d, $p, $l, @o) = split(/\t/, $line); if ($d ne 'pgbouncer') { if (!$last_line{datetime} || ($d gt $last_line{datetime})) { $last_line{datetime} = $d; $last_line{orig} = $l; $last_line{current_pos} = $p; } } else { $d = $p; $p = $l; $l = join("\t", @o); if (!$pgb_last_line{datetime} || ($d gt $pgb_last_line{datetime})) { $pgb_last_line{datetime} = $d; $pgb_last_line{orig} = $l; $pgb_last_line{current_pos} = $p; } } } close($in); } unlink("$tmp_last_parsed"); } # Save last line parsed if ($last_parsed && ($last_line{datetime} || $pgb_last_line{datetime}) && ($last_line{orig} || $pgb_last_line{orig}) ) { &logmsg('DEBUG', "Saving last parsed line into $last_parsed"); if (open(my $out, '>', $last_parsed)) { if ($last_line{datetime}) { $last_line{current_pos} ||= 0; print $out "$last_line{datetime}\t$last_line{current_pos}\t$last_line{orig}\n"; } elsif ($saved_last_line{datetime}) { $saved_last_line{current_pos} ||= 0; print $out "$saved_last_line{datetime}\t$saved_last_line{current_pos}\t$saved_last_line{orig}\n"; } if ($pgb_last_line{datetime}) { $pgb_last_line{current_pos} ||= 0; print $out "pgbouncer\t$pgb_last_line{datetime}\t$pgb_last_line{current_pos}\t$pgb_last_line{orig}\n"; } elsif ($pgb_saved_last_line{datetime}) { $pgb_saved_last_line{current_pos} ||= 0; print $out "pgbouncer\t$pgb_saved_last_line{datetime}\t$pgb_saved_last_line{current_pos}\t$pgb_saved_last_line{orig}\n"; } close($out); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed, $!"); } } if ($terminate) { unlink("$PID_FILE"); exit 2; } #### # Generates statistics output #### my $t1 = Benchmark->new; my $td = timediff($t1, $t0); &logmsg('DEBUG', "the log statistics gathering took:" . timestr($td)); if ($dump_all_queries) { $fh->close(); # Remove pidfile and temporary file unlink($tmp_dblist) if ($tmp_dblist); unlink("$PID_FILE"); unlink("$last_parsed.tmp") if (-e "$last_parsed.tmp"); unlink($TMP_DIR . "/pgbadger_tmp_$$.bin") if ($#outfiles >= 1); exit 0; } # Read the list of database we have proceeded in all child process if ($report_per_database) { %DBLIST = (); if (open(my $out, '<', "$tmp_dblist")) { my @data = <$out>; foreach my $tmp (@data) { chomp($tmp); my %dblist = split(/;/, $tmp); foreach my $d (keys %dblist) { next if ($#dbname >= 0 and !grep(/^$d$/i, @dbname)); $DBLIST{$d} = 1; $overall_stat{nlines}{$d} += $dblist{$d}; } } close($out); &logmsg('DEBUG', "looking for list of database retrieved from log: " . join(',', keys %DBLIST)); } else { &logmsg('ERROR', "can't read list of database from file $tmp_dblist, $!"); } } if ( !$incremental && ($#given_log_files >= 0) ) { my $chld_running = 0; foreach my $db (sort keys %DBLIST) { next if (!$db); if ($^O =~ /MSWin32|dos/i || $parallel_process <= 1) { &gen_database_report($db); } else { while ($chld_running >= $parallel_process) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { $chld_running--; delete $RUNNING_PIDS{$kid}; } sleep(1); } spawn sub { &gen_database_report($db); }; $chld_running++; } } if ($^O !~ /MSWin32|dos/i && $parallel_process > 1) { while (scalar keys %RUNNING_PIDS > $progress) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { delete $RUNNING_PIDS{$kid}; } sleep(1); } } } elsif (!$incremental || !$noreport) { # Look for directory where report must be generated my @build_directories = (); if (-e "$last_parsed.tmp") { if (open(my $in, '<', "$last_parsed.tmp")) { while (my $l = <$in>) { chomp($l); $l =~ s/\r//; push(@build_directories, $l) if (!grep(/^$l$/, @build_directories)); } close($in); unlink("$last_parsed.tmp"); } else { &logmsg('ERROR', "can't read file $last_parsed.tmp, $!"); } &build_incremental_reports(@build_directories); } else { &logmsg('DEBUG', "no new entries in your log(s) since last run."); } } my $t2 = Benchmark->new; $td = timediff($t2, $t1); &logmsg('DEBUG', "building reports took: " . timestr($td)); $td = timediff($t2, $t0); &logmsg('DEBUG', "the total execution time took: " . timestr($td)); # Remove pidfile and temporary file unlink($tmp_dblist); unlink("$PID_FILE"); unlink("$last_parsed.tmp") if (-e "$last_parsed.tmp"); unlink($TMP_DIR . "/pgbadger_tmp_$$.bin") if ($#outfiles >= 1); exit 0; #------------------------------------------------------------------------------- # Show pgBadger command line usage sub usage { print qq{ Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average minutes : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average min: number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log (either a timestamp or a time) -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log (either a timestamp or a time) -E | --explode : explode the main report by generating one report per database. Global information not related to a database are added to the postgres database report. -f | --format logtype : possible values: syslog, syslog2, stderr, jsonlog, cvs, pgbouncer, logplex, rds and redshift. Use this option when pgBadger is not able to detect the log format. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -H | --html-outdir path: path to directory where HTML report must be written in incremental mode, binary files stay on directory defined with -O, --outdir option. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Run as single by default or when working with csvlog. -J | --Jobs number : number of log file to parse in parallel. Process one file at a time by default or when csvlog is used. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | --logfile-list file:file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default truncate size is $maxlength. -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. This option can be used multiple time to output several format. To use json output the Perl module JSON::XS must be installed, To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -Q | --query-numbering : add numbering of queries to the output when using options --dump-all-queries or --normalized-only. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. Can be used multiple time. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -W | --wide-char : encode html output of queries into UTF8 to avoid Perl message "Wide character in print". -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --include-time regex : only timestamps matching the given regex will be included in the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-db name : exclude entries for the specified database from report. Example: "pg_dump". Can be used multiple time. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". Can be used multiple time. --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --exclude-client name : exclude log entries for the specified client ip. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir path : set the path where the pid file must be stored. Default /tmp --pid-file file : set the name of the pid file to manage concurrent execution of pgBadger. Default: pgbadger.pid --rebuild : used to rebuild all html reports in incremental output directories where there's binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, calendar's weeks start on a sunday. Use this option to start on a monday. --iso-week-number : in incremental mode, calendar's weeks start on a monday and respect the ISO 8601 week number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. --normalized-only : only dump all normalized query to out.txt --log-timezone +/-XX : Set the number of hours from GMT of the timezone that must be used to adjust date/time read from log file before beeing parsed. Using this option make more difficult log search with a date/time. --prettify-json : use it if you want json output to be prettified. --month-report YYYY-MM : create a cumulative HTML report over the specified month. Requires incremental output directories and the presence of all necessary binary data files --day-report YYYY-MM-DD: create an HTML report over the specified day. Requires incremental output directories and the presence of all necessary binary data files --noexplain : do not process lines generated by auto_explain. --command CMD : command to execute to retrieve log entries on stdin. pgBadger will open a pipe to the command and parse log entries generated by the command. --no-week : inform pgbadger to not build weekly reports in incremental mode. Useful if it takes too much time. --explain-url URL : use it to override the url of the graphical explain tool. Default: $EXPLAIN_URL --tempdir DIR : set directory where temporary files will be written Default: File::Spec->tmpdir() || '/tmp' --no-process-info : disable changing process title to help identify pgbadger process, some system do not support it. --dump-all-queries : dump all queries found in the log file replacing bind parameters are included in the queries at their respective placeholders position. --keep-comments : do not remove comments from normalized queries. It can be useful if you want to distinguish between same normalized queries. --no-progressbar : disable progressbar. pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-port port ssh port to use for the connection. Default: 22. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=\$ssh_timeout -o PreferredAuthentications=hostbased,publickey Log file to parse can also be specified using an URI, supported protocol are http[s] and [s]ftp. The curl command will be used to download the file and the file will be parsed during download. The ssh protocol is also supported and will use the ssh command like with the remote host use. See examples bellow. Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-10.1-main.log Use URI notation for remote log file: pgbadger http://172.12.110.1//var/log/postgresql/postgresql-10.1-main.log pgbadger ftp://username\@172.12.110.14/postgresql-10.1-main.log pgbadger ssh://username\@172.12.110.14:2222//var/log/postgresql/postgresql-10.1-main.log* You can use together a local PostgreSQL log and a remote pgbouncer log file to parse: pgbadger /var/log/postgresql/postgresql-10.1-main.log ssh://username\@172.12.110.14/pgbouncer.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: user=%u,db=%d ' /pglog/postgresql-10.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +\\%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have other PostgreSQL log file to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). Use -E or --explode if the reports were built using this option. pgBadger also support Heroku PostgreSQL logs using logplex format: heroku logs -p postgres | pgbadger -f logplex -o heroku.html - this will stream Heroku PostgreSQL log to pgbadger through stdin. pgBadger can auto detect RDS and cloudwatch PostgreSQL logs using rds format: pgbadger -f rds -o rds_out.html rds.log CloudSQL Postgresql logs it's fairly normal PostgreSQL log but encapsulated in JSON format. It is auto detected too by pgBagder but in case you need to force the log format, use `jsonlog` pgbadger -f jsonlog -o cloudsql_out.html cloudsql.log This is the same than with the jsonlog extension, the json format is different but pgBadger can parse both format. To create a cumulative report over a month use command: pgbadger --month-report 2919-05 /path/to/incremantal/reports/ this will add a link to the month name into the calendar view in incremental reports to look at report for month 2019 May. Use -E or --explode if the reports were built using this option. }; # Note that usage must be terminated by an extra newline # to not break POD documentation at make time. exit 0; } sub gen_database_report { my $db = shift; # Some message have been temporary stored as ERROR but # they are LOG, restore them to the right log level. &restore_log_type_count($db); foreach $outfile (@outfiles) { ($current_out_file, $extens) = &set_output_extension($outfile, $extension, $db); $extens = $dft_extens if ($current_out_file eq '-' && $dft_extens); if ($report_per_database) { &logmsg('LOG', "Ok, generating $extens report for database $db..."); } else { &logmsg('LOG', "Ok, generating $extens report..."); } if ($extens ne 'tsung') { $fh = new IO::File ">$current_out_file"; if (not defined $fh) { localdie("FATAL: can't write to $current_out_file, $!\n"); } if (($extens eq 'text') || ($extens eq 'txt')) { if ($error_only) { &dump_error_as_text($db); } else { &dump_as_text($db); } } elsif ($extens eq 'json') { if ($error_only) { &dump_error_as_json($db); } else { &dump_as_json($db); } } elsif ($extens eq 'binary') { &dump_as_binary($fh, $db); } else { &dump_as_html('.', $db); } $fh->close; } else { # Open filehandle $fh = new IO::File ">>$current_out_file"; if (not defined $fh) { localdie("FATAL: can't write to $current_out_file, $!\n"); } print $fh "\n\n"; $fh->close(); } } } #### # Function used to validate the possibility to use process on the given # file. Returns 1 when all multiprocess can be used, 0 when we can not # use multiprocess on a single file (remore file) and -1 when parallel # process can not be used too (binary mode). #### sub confirm_multiprocess { my $file = shift; if ($remote_host || $file =~ /^(http[s]*|ftp[s]*|ssh):/) { # Disable multi process when using ssh to parse remote log if ($queue_size > 1) { &logmsg('DEBUG', "parallel processing is not supported with remote files."); } return 0; } # Disable parallel processing in binary mode if ($format eq 'binary') { if (($queue_size > 1) || ($job_per_file > 1)) { &logmsg('DEBUG', "parallel processing is not supported with binary format.") if (!$quiet); } return -1; } return 1; } sub set_ssh_command { my ($ssh_cmd, $rhost) = @_; #http://www.domain.com:8080/file.log:format #ftp://www.domain.com/file.log:format #ssh:root@domain.com:file.log:format # Extract format part my $fmt = ''; if ($rhost =~ s/\|([a-z2]+)$//) { $fmt = $1; } $ssh_cmd = $ssh_bin || 'ssh'; $ssh_cmd .= " -p $ssh_port" if ($ssh_port); $ssh_cmd .= " -i $ssh_identity" if ($ssh_identity); $ssh_cmd .= " $ssh_options" if ($ssh_options); if ($ssh_user && $rhost !~ /\@/) { $ssh_cmd .= " $ssh_user\@$rhost"; } else { $ssh_cmd .= " $rhost"; } if (wantarray()) { return ($ssh_cmd, $fmt); } else { return $ssh_cmd; } } sub set_file_list { my $file = shift; my @lfiles = (); my $file_orig = $file; my $fmt = ''; # Remove log format from log file if any if ($file =~ s/(:(?:stderr|csv|syslog|pgbouncer|jsonlog|logplex|rds|redshift)\d*)$//i) { $fmt = $1; } # Store the journalctl command as is we will create a pipe from this command if ( $journalctl_cmd && ($file =~ m/\Q$journalctl_cmd\E/) ) { push(@lfiles, $file_orig); $empty_files = 0; } # Store the journalctl command as is we will create a pipe from this command elsif ( $log_command && ($file =~ m/\Q$log_command\E/) ) { push(@lfiles, $file_orig); $empty_files = 0; } # Input from stdin elsif ($file eq '-') { if ($logfile_list) { localdie("FATAL: stdin input - can not be used with logfile list (-L).\n"); } push(@lfiles, $file_orig); $empty_files = 0; } # For input from other sources than stdin else { # if it is not a remote file store the file if it is not an empty file if (!$remote_host && $file !~ /^(http[s]*|[s]*ftp|ssh):/i) { localdie("FATAL: logfile \"$file\" must exist!\n") if (not -f $file); if (-z $file) { print "WARNING: file $file is empty\n" if (!$quiet); next; } push(@lfiles, $file_orig); $empty_files = 0; } # if this is a remote file extract the list of files using a ssh command elsif ($file !~ /^(http[s]*|[s]*ftp):/i) { # Get files from remote host if ($file !~ /^ssh:/) { my($filename, $dirs, $suffix) = fileparse($file); &logmsg('DEBUG', "Looking for remote filename using command: $remote_command \"ls '$dirs'$filename\""); my @rfiles = `$remote_command "ls '$dirs'$filename"`; foreach my $f (@rfiles) { push(@lfiles, "$f$fmt"); } } elsif ($file =~ m#^ssh://([^\/]+)/(.*)#) { my $host_info = $1; my $file = $2; my $ssh = $ssh_command || 'ssh'; if ($host_info =~ s/:(\d+)$//) { $host_info = "-p $1 $host_info"; } my($filename, $dirs, $suffix) = fileparse($file); &logmsg('DEBUG', "Looking for remote filename using command: $ssh $host_info \"ls '$dirs'$filename\""); my @rfiles = `$ssh $host_info "ls '$dirs'$filename"`; $dirs = '' if ( $filename ne '' ); #ls returns relative paths for an directory but absolute ones for a file or filename pattern foreach my $f (@rfiles) { $host_info =~ s/-p (\d+) (.*)/$2:$1/; push(@lfiles, "ssh://$host_info/$dirs$f$fmt"); } } $empty_files = 0; } # this is remote file extracted using http/ftp protocol, store the uri else { push(@lfiles, $file_orig); $empty_files = 0; } } return @lfiles; } # Get inbounds of query times histogram sub get_hist_inbound { my ($duration, @histogram) = @_; for (my $i = 0; $i <= $#histogram; $i++) { return $histogram[$i-1] if ($histogram[$i] > $duration); } return -1; } # Compile custom log line prefix prefix sub set_parser_regex { my $fmt = shift; @prefix_params = (); @prefix_q_params = (); if ($fmt eq 'pgbouncer') { $pgbouncer_log_format = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)(?:\(\d+\))??[:\d]* (.*)/; @pgb_prefix_params = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse1 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) (.\-0x[0-9a-f\.]+|[Ss]tats): (.*)/; @pgb_prefix_parse1 = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_query'); $pgbouncer_log_parse2 = qr/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+(?: [A-Z\+\-\d]{3,6})? \d+ [^\s]+ .\-0x[0-9a-f\.]*: ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_parse2 = ('t_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse3 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) ([^:]+: .*)/; @pgb_prefix_parse3 = ('t_timestamp', 't_pid', 't_loglevel', 't_query'); } elsif ($fmt eq 'pgbouncer1') { $pgbouncer_log_format = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]: (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_params = ('t_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse1 = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]: (.\-0x[0-9a-f\.]+|[Ss]tats): (.*)/; @pgb_prefix_parse1 = ('t_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_id', 't_query'); $pgbouncer_log_parse2 = qr/^...\s+\d+\s\d+:\d+:\d+(?:\s[^\s]+)?\s[^\s]+\s[^\s\[]+\[\d+\]: .\-0x[0-9a-f\.]*: ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_parse2 = ('t_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse3 = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]: ([^:]+: .*)/; @pgb_prefix_parse3 = ('t_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_query'); } elsif ($fmt eq 'pgbouncer2') { $pgbouncer_log_format = qr/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]: (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_params = ('t_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse1 = qr/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]: (.\-0x[0-9a-f\.]+|[Ss]tats): (.*)/; @pgb_prefix_parse1 = ('t_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_id', 't_query'); $pgbouncer_log_parse2 = qr/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:.[^\s]+)?\s[^\s]+\s(?:[^\s]+\s)?(?:[^\s]+\s)?[^\s\[]+\[\d+\]: .\-0x[0-9a-f\.]*: ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_parse2 = ('t_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse3 = qr/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]: ([^:]+: .*)/; @pgb_prefix_parse3 = ('t_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_query'); } elsif ($fmt eq 'pgbouncer3') { $pgbouncer_log_format = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? \[(\d+)\] ([^\s]+) (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)(?:\(\d+\))??[:\d]* (.*)/; @pgb_prefix_params = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse1 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? \[(\d+)\] ([^\s]+) (.\-0x[0-9a-f\.]+|[Ss]tats): (.*)/; @pgb_prefix_parse1 = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_query'); $pgbouncer_log_parse2 = qr/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+(?: [A-Z\+\-\d]{3,6})? \[\d+\] [^\s]+ .\-0x[0-9a-f\.]*: ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?(?:\(\d+\))?(?:\(\d+\))?[:\d]* (.*)/; @pgb_prefix_parse2 = ('t_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse3 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? \[(\d+)\] ([^\s]+) ([^:]+: .*)/; @pgb_prefix_parse3 = ('t_timestamp', 't_pid', 't_loglevel', 't_query'); } elsif ($log_line_prefix) { # Build parameters name that will be extracted from the prefix regexp my %res = &build_log_line_prefix_regex($log_line_prefix); my $llp = $res{'llp'}; @prefix_params = @{ $res{'param_list'} }; $q_prefix = $res{'q_prefix'}; @prefix_q_params = @{ $res{'q_param_list'} }; if ($fmt eq 'syslog') { $llp = '^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)(?:\-\d+)?\]\s*' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; unshift(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line'); push(@prefix_params, 't_loglevel', 't_query'); $other_syslog_line = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)(?:\-\d+)?\]\s*(.*)/; } elsif ($fmt eq 'syslog2') { $fmt = 'syslog'; $llp = '^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)(?:\-\d+)?\])?\s*' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line'); push(@prefix_params, 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)(?:\-\d+)?\])?\s*(.*)/; } elsif ($fmt eq 'logplex') { # The output format of the heroku pg logs is as follows: timestamp app[dyno]: message $llp = '^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:\.\d+)?[+\-]\d{2}:\d{2}\s+(?:[^\s]+)?\s*app\[postgres\.(\d+)\][:]?\s+\[([^\]]+)\]\s+\[\d+\-\d+\]\s+' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)'; $compiled_prefix = qr/$llp/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_pid', 't_dbname'); push(@prefix_params, 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)[+\-]\d{2}:\d{2}\s+(?:[^\s]+)?\s*app\[postgres\.\d+\][:]?\s+\[([^\]]+)\]\s+\[(\d+)\-(\d+)\]\s+(.*)/; } elsif ($fmt =~ /^rds$/) { # The output format of the RDS pg logs is as follows: %t:%r:%u@%d:[%p]: message # With Cloudwatch it is prefixed with an other timestamp $llp = '^' . $llp . '(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)'; $compiled_prefix = qr/$llp/; @prefix_params = ('t_timestamp', 't_client', 't_dbuser', 't_dbname', 't_pid', 't_loglevel', 't_query'); } elsif ($fmt =~ /^redshift$/) { # Look at format of the AWS redshift pg logs, for example: # '2020-03-07T16:09:43Z UTC [ db=dev user=rdsdb pid=16929 userid=1 xid=7382 ]' $llp = '^' . $llp . '(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)'; $compiled_prefix = qr/$llp/; @prefix_params = ('t_timestamp', 't_dbname', 't_dbuser', 't_pid', 't_loglevel', 't_query'); } elsif ($fmt eq 'stderr' || $fmt eq 'default' || $fmt eq 'jsonlog') { $fmt = 'stderr' if ($fmt ne 'jsonlog'); $llp = '^' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; push(@prefix_params, 't_loglevel', 't_query'); } } elsif ($fmt eq 'syslog') { $compiled_prefix = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)(?:\-\d+)?\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query'); $other_syslog_line = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)(?:\-\d+)?\]\s*(.*)/; } elsif ($fmt eq 'syslog2') { $fmt = 'syslog'; $compiled_prefix = qr/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)(?:\-\d+)?\])?\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)(?:\-\d+)?\])?\s*(.*)/; } elsif ($fmt eq 'logplex') { # The output format of the heroku pg logs is as follows: timestamp app[dyno]: message $compiled_prefix = qr/^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:\.\d+)?[+\-]\d{2}:\d{2}\s+(?:[^\s]+)?\s*app\[postgres\.(\d+)\][:]?\s+\[([^\]]+)\]\s+\[\d+\-\d+\]\s+(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_pid', 't_dbname'); push(@prefix_params, 't_logprefix', 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)[+\-]\d{2}:\d{2}\s+(?:[^\s]+)?\s*app\[(postgres)\.(\d+)\][:]?\s+\[([^\]]+)\]\s+\[\d+\-\d+\]\s+(.*)/; } elsif ($fmt eq 'rds') { # The output format of the RDS pg logs is as follows: %t:%r:%u@%d:[%p]: message # With Cloudwatch it is prefixed with an other timestamp $compiled_prefix = qr/^(?:\d+-\d+-\d+T\d+:\d+:\d+\.\d+Z)?\s*(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)\s*[^:]*:([^:]*):([^\@]*)\@([^:]*):\[(\d+)\]:(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_client', 't_dbuser', 't_dbname', 't_pid', 't_loglevel', 't_query'); } elsif ($fmt eq 'redshift') { # Look at format of the AWS redshift pg logs, for example: # '2020-03-07T16:09:43Z UTC [ db=dev user=rdsdb pid=16929 userid=1 xid=7382 ]' $compiled_prefix = qr/^'(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z [^\s]+ \[ db=(.*?) user=(.*?) pid=(\d+) userid=\d+ xid=(?:.*?) \]' (LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(.*)/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_dbname', 't_dbuser', 't_pid', 't_loglevel', 't_query'); } elsif ($fmt eq 'stderr') { $compiled_prefix = qr/^(\d{10}\.\d{3}|\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})[\.\d]*(?: [A-Z\+\-\d]{3,6})?\s\[([0-9a-f\.]+)\][:]*\s(?:\[\d+\-\d+\])?\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_timestamp', 't_pid', 't_logprefix', 't_loglevel', 't_query'); } elsif ($fmt eq 'default') { $fmt = 'stderr'; $compiled_prefix = qr/^(\d{10}\.\d{3}|\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})[\.\d]*(?: [A-Z\+\-\d]{3,6})?\s\[([0-9a-f\.]+)\][:]*\s(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_timestamp', 't_pid', 't_logprefix', 't_loglevel', 't_query'); } return $fmt; } sub check_regex { my ($pattern, $varname) = @_; eval {m/$pattern/i;}; if ($@) { localdie("FATAL: '$varname' invalid regex '$pattern', $!\n"); } } sub build_incremental_reports { my @build_directories = @_; my $destdir = $html_outdir || $outdir; my %weeks_directories = (); foreach my $bpath (sort @build_directories) { my $binpath = ''; $binpath = $1 if ($bpath =~ s/^(.*\/)(\d+\-\d+\-\d+)$/$2/); &logmsg('DEBUG', "Building incremental report for " . $bpath); $incr_date = $bpath; $last_incr_date = $bpath; # Set the path to binary files $bpath =~ s/\-/\//g; # Get the week number following the date $incr_date =~ /^(\d+)-(\d+)\-(\d+)$/; my $wn = &get_week_number($1, $2, $3); if (!$noweekreport) { if ($rebuild || !exists $weeks_directories{$wn}) { $weeks_directories{$wn}{dir} = "$1-$2"; $weeks_directories{$wn}{prefix} = $binpath if ($binpath); } } # First clear previous stored statistics &init_stats_vars(); # Load all data gathered by all the different processes $destdir = $binpath || $outdir; if (opendir(DIR, "$destdir/$bpath")) { my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $destdir/$bpath/$f") or localdie("FATAL: can't open file $destdir/$bpath/$f, $!\n"); load_stats($fht); $fht->close(); } } $destdir = $html_outdir || $outdir; foreach my $db (sort keys %DBLIST) { #next if ($#dbname >= 0 and !grep(/^$db$/i, @dbname)); my $tmp_dir = "$destdir/$db"; $tmp_dir = $destdir if (!$report_per_database); &logmsg('LOG', "Ok, generating HTML daily report into $tmp_dir/$bpath/..."); # set path and create subdirectories mkdir("$tmp_dir") if (!-d "$tmp_dir"); if ($bpath =~ m#^(\d+)/(\d+)/(\d+)#) { mkdir("$tmp_dir/$1") if (!-d "$tmp_dir/$1"); mkdir("$tmp_dir/$1/$2") if (!-d "$tmp_dir/$1/$2"); mkdir("$tmp_dir/$1/$2/$3") if (!-d "$tmp_dir/$1/$2/$3"); } else { &logmsg('ERROR', "invalid path: $bpath, can not create subdirectories."); } $fh = new IO::File ">$tmp_dir/$bpath/$current_out_file"; if (not defined $fh) { localdie("FATAL: can't write to $tmp_dir/$bpath/$current_out_file, $!\n"); } &dump_as_html('../../..', $db); $fh->close; } } # Build a report per week foreach my $wn (sort { $a <=> $b } keys %weeks_directories) { &init_stats_vars(); # Get all days of the current week my $getwnb = $wn; $getwnb-- if (!$iso_week_number); my @wdays = &get_wdays_per_month($getwnb, $weeks_directories{$wn}{dir}); my $binpath = ''; $binpath = $weeks_directories{$wn}{prefix} if (defined $weeks_directories{$wn}{prefix}); my $wdir = ''; # Load data per day foreach my $bpath (@wdays) { $incr_date = $bpath; $bpath =~ s/\-/\//g; $incr_date =~ /^(\d+)\-(\d+)\-(\d+)$/; $wdir = "$1/week-$wn"; $destdir = $binpath || $outdir; # Load all data gathered by all the differents processes if (-e "$destdir/$bpath") { unless(opendir(DIR, "$destdir/$bpath")) { localdie("FATAL: can't opendir $destdir/$bpath: $!\n"); } my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $destdir/$bpath/$f") or localdie("FATAL: can't open file $destdir/$bpath/$f, $!\n"); load_stats($fht); $fht->close(); } } } $destdir = $html_outdir || $outdir; foreach my $db (sort keys %DBLIST) { #next if ($#dbname >= 0 and !grep(/^$db$/i, @dbname)); my $tmp_dir = "$destdir/$db"; $tmp_dir = $destdir if (!$report_per_database); &logmsg('LOG', "Ok, generating HTML weekly report into $tmp_dir/$wdir/..."); mkdir("$tmp_dir") if (!-d "$tmp_dir"); my $path = $tmp_dir; foreach my $d (split('/', $wdir)) { mkdir("$path/$d") if (!-d "$path/$d"); $path .= "/$d"; } $fh = new IO::File ">$tmp_dir/$wdir/$current_out_file"; if (not defined $fh) { localdie("FATAL: can't write to $tmp_dir/$wdir/$current_out_file, $!\n"); } &dump_as_html('../..', $db); $fh->close; } } # Generate global index to access incremental reports &build_global_index(); } sub build_month_reports { my ($month_path, @build_directories) = @_; # First clear previous stored statistics &init_stats_vars(); foreach my $bpath (sort @build_directories) { $incr_date = $bpath; $last_incr_date = $bpath; # Set the path to binary files $bpath =~ s/\-/\//g; # Get the week number following the date $incr_date =~ /^(\d+)-(\d+)\-(\d+)$/; &logmsg('DEBUG', "reading month statistics from $outdir/$bpath"); # Load all data gathered by all the different processes unless(opendir(DIR, "$outdir/$bpath")) { localdie("FATAL: can't opendir $outdir/$bpath: $!\n"); } my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $outdir/$bpath/$f") or localdie("FATAL: can't open file $outdir/$bpath/$f, $!\n"); load_stats($fht); $fht->close(); } } my $dest_dir = $html_outdir || $outdir; foreach my $db (sort keys %DBLIST) { my $tmp_dir = "$dest_dir/$db"; $tmp_dir = $dest_dir if (!$report_per_database); &logmsg('LOG', "Ok, generating HTML monthly report into $tmp_dir/$month_path/index.html"); mkdir("$tmp_dir") if (!-d "$tmp_dir"); my $path = $tmp_dir; foreach my $d (split('/', $month_path)) { mkdir("$path/$d") if (!-d "$path/$d"); $path .= "/$d"; } $fh = new IO::File ">$tmp_dir/$month_path/index.html"; if (not defined $fh) { localdie("FATAL: can't write to $tmp_dir/$month_path/index.html, $!\n"); } &dump_as_html('../..', $db); $fh->close; } # Generate global index to access incremental reports &build_global_index(); } sub build_day_reports { my ($day_path, @build_directories) = @_; # First clear previous stored statistics &init_stats_vars(); foreach my $bpath (sort @build_directories) { $incr_date = $bpath; $last_incr_date = $bpath; # Set the path to binary files $bpath =~ s/\-/\//g; # Get the week number following the date $incr_date =~ /^(\d+)-(\d+)\-(\d+)$/; &logmsg('DEBUG', "reading month statistics from $outdir/$bpath"); # Load all data gathered by all the different processes unless(opendir(DIR, "$outdir/$bpath")) { localdie("FATAL: can't opendir $outdir/$bpath: $!\n"); } my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $outdir/$bpath/$f") or localdie("FATAL: can't open file $outdir/$bpath/$f, $!\n"); load_stats($fht); $fht->close(); } } my $dest_dir = $html_outdir || $outdir; foreach my $db (sort keys %DBLIST) { my $tmp_dir = "$dest_dir/$db"; $tmp_dir = $dest_dir if (!$report_per_database); &logmsg('LOG', "Ok, generating HTML daily report into $tmp_dir/$day_path/index.html"); mkdir("$tmp_dir") if (!-d "$tmp_dir"); my $path = $tmp_dir; foreach my $d (split('/', $day_path)) { mkdir("$path/$d") if (!-d "$path/$d"); $path .= "/$d"; } $fh = new IO::File ">$tmp_dir/$day_path/index.html"; if (not defined $fh) { localdie("FATAL: can't write to $tmp_dir/$day_path/index.html, $!\n"); } &dump_as_html('../..', $db); $fh->close; } # Generate global index to access incremental reports &build_global_index(); } sub build_global_index { &logmsg('LOG', "Ok, generating global index to access incremental reports..."); my $dest_dir = $html_outdir || $outdir; # Get database directories unless(opendir(DIR, "$dest_dir")) { localdie("FATAL: can't opendir $dest_dir: $!\n"); } my @dbs = grep { !/^\./ && !/^\d{4}$/ && -d "$dest_dir/$_" } readdir(DIR); closedir DIR; @dbs = ($DBALL) if (!$report_per_database); foreach my $db (@dbs) { #next if ($#dbname >= 0 and !grep(/^$db$/i, @dbname)); my $tmp_dir = "$dest_dir/$db"; $tmp_dir = $dest_dir if (!$report_per_database); &logmsg('DEBUG', "writing global index into $tmp_dir/index.html"); $fh = new IO::File ">$tmp_dir/index.html"; if (not defined $fh) { localdie("FATAL: can't write to $tmp_dir/index.html, $!\n"); } my $date = localtime(time); my @tmpjscode = @jscode; my $path_prefix = '.'; $path_prefix = '..' if ($report_per_database); for (my $i = 0; $i <= $#tmpjscode; $i++) { $tmpjscode[$i] =~ s/EDIT_URI/$path_prefix/; } my $local_title = 'Global Index on incremental reports'; if ($report_title) { $local_title = 'Global Index - ' . $report_title; } print $fh qq{ pgBadger :: $local_title @tmpjscode
}; # get year directories unless(opendir(DIR, "$tmp_dir")) { localdie("FATAL: can't opendir $tmp_dir: $!\n"); } my @dyears = grep { !/^\./ && /^\d{4}$/ } readdir(DIR); closedir DIR; foreach my $y (sort { $b <=> $a } @dyears) { print $fh qq{

Year $y

}; # foreach year directory look for week directories unless(opendir(DIR, "$tmp_dir/$y")) { localdie("FATAL: can't opendir $tmp_dir/$y: $!\n"); } my @ymonths = grep { /^\d{2}$/ } readdir(DIR); closedir DIR; my $i = 1; foreach my $m (sort {$a <=> $b } @ymonths) { print $fh "\n"; print $fh "\n\n" if ( ($i%4) == 0 ); $i++; } print $fh qq{
", &get_calendar($db, $y, $m), "
}; } print $fh qq{
 ^ 
}; $fh->close; } } sub cleanup_directory { my ($dir, $remove_dir) = @_; unless(opendir(DIR, "$dir")) { localdie("FATAL: can't opendir $dir: $!\n"); } my @todel = grep { !/^\./ } readdir(DIR); closedir DIR; map { unlink("$dir/$_"); } @todel; rmdir("$dir") if ($remove_dir); } sub write_resources { # Write resource file to report directory or return resources in and array of lines my $rscfh; my @contents = (); my $endfile = ''; my $file = ''; my $major_version = $VERSION; $major_version =~ s/\..*//; my $rscdir = $html_outdir || $outdir; while (my $l = ) { last if ($l =~ /^__END__$/); if ($l =~ /^WRFILE: ([^\s]+)/) { $file = $1; if (!$extra_files) { if ($#contents > 0) { push(@contents, $endfile); } if ($file =~ /\.css$/i) { push(@contents, ""; } elsif ($file =~ /\.js$/i) { push(@contents, ""; } next; } $rscfh->close() if (defined $rscfh); if ($file =~ /\.css$/i) { push(@contents, "\n"); } elsif ($file =~ /\.js$/i) { push(@contents, "\n"); } if ($extra_files) { if (!-e "$rscdir/$major_version") { mkdir("$rscdir/$major_version"); } if (!-e "$rscdir/$major_version/$file") { $rscfh = new IO::File ">$rscdir/$major_version/$file"; localdie("FATAL: can't write file $rscdir/$major_version/$file\n") if (not defined $rscfh); } } next; } if (!$extra_files) { push(@contents, $l); } else { $rscfh->print($l) if (defined $rscfh); } } $rscfh->close() if (defined $rscfh); # Return __DATA__ content if --extra-files is not used # or HTML links to resources files if (!$extra_files) { push(@contents, $endfile); } return @contents; } sub sort_by_week { my $curr = shift; my $next = shift; $a =~ /week\-(\d+)/; $curr = $1; $b =~ /week\-(\d+)/; $next = $1; return $next <=> $curr; } sub init_stats_vars { # Empty where statistics are stored %overall_stat = (); %pgb_overall_stat = (); %overall_checkpoint = (); %top_slowest = (); %top_tempfile_info = (); %top_cancelled_info = (); %top_locked_info = (); %normalyzed_info = (); %error_info = (); %pgb_error_info = (); %pgb_pool_info = (); %logs_type = (); %errors_code = (); %per_minute_info = (); %pgb_per_minute_info = (); %lock_info = (); %tempfile_info = (); %cancelled_info = (); %connection_info = (); %pgb_connection_info = (); %database_info = (); %application_info = (); %session_info = (); %pgb_session_info = (); %conn_received = (); %checkpoint_info = (); %autovacuum_info = (); %autoanalyze_info = (); @graph_values = (); %cur_info = (); $nlines = 0; %tsung_session = (); } #### # Main function called per each parser process #### sub multiprocess_progressbar { my $totalsize = shift; &logmsg('DEBUG', "Starting progressbar writer process"); $0 = 'pgbadger logger' if (!$disable_process_title); # Terminate the process when we haven't read the complete file but must exit local $SIG{USR1} = sub { print STDERR "\n"; exit 1; }; my $timeout = 3; my $cursize = 0; my $nqueries = 0; my $nerrors = 0; my $fmt = 'stderr'; $pipe->reader(); while (my $r = <$pipe>) { chomp($r); my @infos = split(/\s+/, $r); last if ($infos[0] eq 'QUIT'); $cursize += $infos[0]; $nqueries += $infos[1]; $nerrors += $infos[2]; $fmt = $infos[3] if ($#infos == 3); $cursize = $totalsize if ($totalsize > 0 && $cursize > $totalsize); print STDERR &progress_bar($cursize, $totalsize, 25, '=', $nqueries, $nerrors, $fmt); } print STDERR "\n"; exit 0; } sub update_progress_bar { my ($tmpoutfile, $nlines, $stop_offset, $totalsize, $cursize, $old_queries_count, $old_errors_count, $fmt) = @_; return if (!$progress); if (!$tmpoutfile || not defined $pipe) { if ($progress && (($nlines % $NUMPROGRESS) == 0)) { print STDERR &progress_bar($$cursize, $stop_offset || $totalsize, 25, '=', undef, undef, $fmt); $NUMPROGRESS *= 10 if ($NUMPROGRESS < 10000); } } else { if ($progress && ($nlines % $NUMPROGRESS) == 0) { $pipe->print("$$cursize " . ($overall_stat{'queries_number'} + $pgb_overall_stat{'errors_number'} - $$old_queries_count) . " " . ($overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'} - $$old_errors_count) . " $fmt\n"); $$old_queries_count = $overall_stat{'queries_number'} + $pgb_overall_stat{'queries_number'}; $$old_errors_count = $overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'}; $$cursize = 0; $NUMPROGRESS *= 10 if ($NUMPROGRESS < 10000); } } } sub is_pgbouncer_error { if ($_[0] =~ /(login|connect) failed /) { return 'FATAL'; } return 'LOG'; } sub set_current_db { my $dbn = shift; # Don't collect stats on database if it is excluded return $dbn if (grep(/^$dbn$/i, @exclude_db)); if (!$report_per_database || !$dbn || $dbn eq '[unknown]') { $overall_stat{nlines}{$DBALL}++; return $DBALL } $DBLIST{$dbn} = 1; $overall_stat{nlines}{$dbn}++; return $dbn; } #### # Main function called per each parser process #### sub process_file { my ($logfile, $totalsize, $fmt, $tmpoutfile, $start_offset, $stop_offset, $chunk_pos) = @_; my $old_queries_count = 0; my $old_errors_count = 0; my $getout = 0; my $http_download = ($logfile =~ /^(http[s]*:|[s]*ftp:)/i) ? 1 : 0; $start_offset ||= 0; $0 = 'pgbadger parser' if (!$disable_process_title); &init_stats_vars() if ($tmpoutfile); if (!$remote_host) { &logmsg('DEBUG', "Processing log file: $logfile"); } else { &logmsg('DEBUG', "Processing remote log file: $remote_host:$logfile"); } local $SIG{INT} = sub { print STDERR "Received SIGINT abort parsing...\n"; unlink("$PID_FILE"); $terminate = 1; }; local $SIG{TERM} = sub { print STDERR "Received SIGTERM abort parsing...\n"; unlink("$PID_FILE"); $terminate = 1; }; my $curdate = localtime(time); $pipe->writer() if (defined $pipe); # Syslog does not have year information, so take care of year overlapping my ($gsec, $gmin, $ghour, $gmday, $gmon, $gyear, $gwday, $gyday, $gisdst) = localtime(time); $gyear += 1900; my $CURRENT_DATE = $gyear . sprintf("%02d", $gmon + 1) . sprintf("%02d", $gmday); my $cursize = 0; # Get a filehandle to the log file my $lfile = &get_log_file($logfile, $totalsize); if ($logfile ne '-') { if ($progress && ($getout != 1)) { if (!$tmpoutfile || not defined $pipe) { print STDERR &progress_bar( $cursize, $stop_offset || $totalsize, 25, '=', ($overall_stat{'queries_number'} + $pgb_overall_stat{'queries_number'}), ($overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}), $fmt ); } else { $pipe->print("$cursize " . ($overall_stat{'queries_number'} + $pgb_overall_stat{'queries_number'} - $old_queries_count) . " " . ($overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'} - $old_errors_count) . " $fmt\n"); } } if (!$totalsize && $tmpoutfile) { &dump_as_binary($tmpoutfile); $tmpoutfile->close(); } } # Reset the start position if file is smaller that the current start offset if ($totalsize > -1 && $start_offset > $totalsize) { &logmsg('DEBUG', "Starting offset $start_offset is greater than total size $totalsize for file $logfile"); &logmsg('DEBUG', "Reverting start offset $start_offset to 0 for file $logfile, stoppping offset is " . ($stop_offset || $totalsize)); $start_offset = 0 ; } # Check if the first date in the log are after the last date saved if (($logfile ne '-') && ($fmt ne 'binary') && ($fmt ne 'csv') && !$http_download) { if ($start_offset && !$chunk_pos) { my ($retcode, $msg) = check_file_changed($logfile, $file_size{$logfile}, $fmt, ($fmt =~ /pgbouncer/) ? $pgb_saved_last_line{datetime} : $saved_last_line{datetime}, $start_offset, 1); if ($retcode) { &logmsg('DEBUG', "This file should be parsed from the beginning: $logfile, $msg"); &logmsg('DEBUG', "Reverting start offset $start_offset to 0 for file $logfile, stoppping offset is " . ($stop_offset || $totalsize)); $start_offset = 0; } else { &logmsg('DEBUG', "This log might not be parsed: $logfile, $msg"); } $cursize = $start_offset; } } else { $start_offset = 0; $stop_offset = 0; } # Set some boolean to gain speed my $is_json_log = 0; $is_json_log = 1 if ($fmt =~ /jsonlog/); my $is_syslog = 0; $is_syslog = 1 if ($fmt =~ /syslog|logplex/); if ($stop_offset > 0) { $totalsize = $stop_offset - $start_offset; } my $current_offset = $start_offset || 0; if (!$remote_host) { &logmsg('DEBUG', "Starting reading file \"$logfile\"..."); } else { &logmsg('DEBUG', "Starting reading file \"$remote_host:$logfile\"..."); } # Parse pgbouncer logfile if ($fmt =~ /pgbouncer/) { my $cur_pid = ''; my @matches = (); my $has_exclusion = 0; if ($#exclude_line >= 0) { $has_exclusion = 1; } &logmsg('DEBUG', "Start parsing pgbouncer log at offset $start_offset of file $logfile to " . ($stop_offset || $totalsize)); if ($start_offset) { # Move to the starting offset position in file $lfile->seek($start_offset, 0); } # pgbouncer reports are forced in the postgres report. # There is no per database pgbouncer statitiscs collected my $curdb = set_current_db(); while (my $line = <$lfile>) { # We received a signal last if ($terminate); # Get current size/offset in the log file $cursize += length($line) + (&get_eol_length() - 1); $current_offset += length($line) + (&get_eol_length() - 1); # Replace CR/LF by LF $line =~ s/\r//; # Start to exclude from parsing any desired lines if ($has_exclusion >= 0) { # Log line matches the excluded regex map { next if ($line =~ /$_/is); } @exclude_line; } chomp($line); $nlines++; next if (!$line); &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); %prefix_vars = (); my $special_format = 0; @matches = ($line =~ $pgbouncer_log_parse1); if ($#matches == -1) { @matches = ($line =~ $pgbouncer_log_parse3); $special_format = 1 if ($#matches >= 0); } if ($#matches >= 0) { # Get all relevant fields extracted through the regexp if (!$special_format) { for (my $i = 0 ; $i <= $#pgb_prefix_parse1 ; $i++) { $prefix_vars{$pgb_prefix_parse1[$i]} = $matches[$i]; } } else { for (my $i = 0 ; $i <= $#pgb_prefix_parse3 ; $i++) { $prefix_vars{$pgb_prefix_parse3[$i]} = $matches[$i]; } } # Get detailled information from timestamp if (!$prefix_vars{'t_month'}) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } else { # Standard syslog format does not have year information, months are # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); $prefix_vars{'t_month'} = $month_abbr{$prefix_vars{'t_month'}}; # Take care of year overlapping if ("$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}" > $CURRENT_DATE) { $prefix_vars{'t_year'} = substr($CURRENT_DATE, 0, 4) - 1; } } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Change log level for some relevant messages if ($prefix_vars{'t_loglevel'} !~ $main_error_regex) { $prefix_vars{'t_loglevel'} = is_pgbouncer_error($prefix_vars{'t_query'}); } if ($prefix_vars{'t_session_id'} eq 'Stats') { $prefix_vars{'t_loglevel'} = 'STATS'; $prefix_vars{'t_session_id'} = ''; $prefix_vars{'t_query'} = 'Stats: ' . $prefix_vars{'t_query'}; } # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); $getout = 2; last; } # Jump to the last line parsed if required next if (($incremental || $last_parsed) && !&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}, $prefix_vars{'t_pid'}); # Override timestamp when we have to adjust datetime to the log timezone if ($log_timezone) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = change_timezone($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Extract other information from the line @matches = ($line =~ $pgbouncer_log_parse2); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#pgb_prefix_parse2 ; $i++) { $prefix_vars{$pgb_prefix_parse2[$i]} = $matches[$i]; } $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv && $prefix_vars{'t_client'}); } else { # pgBouncer Statistics appears each minutes in the log if ($prefix_vars{'t_query'} =~ /(\d+) req\/s, in (\d+) b\/s, out (\d+) b\/s,query (\d+) us/) { $prefix_vars{'t_loglevel'} = 'STATS'; $prefix_vars{'t_req/s'} = $1; $prefix_vars{'t_inbytes/s'} = $2; $prefix_vars{'t_outbytes/s'} = $3; $prefix_vars{'t_avgduration'} = $4; } elsif ($prefix_vars{'t_query'} =~ /(\d+) xacts\/s, (\d+) queries\/s, in (\d+) B\/s, out (\d+) B\/s, xact (\d+) us, query (\d+) us/) { $prefix_vars{'t_loglevel'} = 'STATS'; $prefix_vars{'t_xact/s'} = $1; $prefix_vars{'t_req/s'} = $2; $prefix_vars{'t_inbytes/s'} = $3; $prefix_vars{'t_outbytes/s'} = $4; $prefix_vars{'t_avgtxduration'} = $5; $prefix_vars{'t_avgduration'} = $6; } } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { $prefix_vars{'t_host'} = 'stderr'; # this unused variable is used to store format information when log format is not syslog # Process the log line &parse_pgbouncer($fmt); } } else { # unknown format &logmsg('DEBUG', "Unknown pgbouncer line format: $line"); } last if (($stop_offset > 0) && ($current_offset >= $stop_offset)); } if ($last_parsed) { $pgb_last_line{current_pos} = $current_offset; } } # Parse PostgreSQL log file with CSV format elsif ($fmt eq 'csv') { if ($queue_size > 1 || $job_per_file > 1) { &logmsg('WARNING', "parallel processing is disabled with csv format."); } require Text::CSV_XS; my $csv = Text::CSV_XS->new( { binary => 1, eol => $/, sep_char => $csv_sep_char, allow_loose_quotes => 1, } ); # Parse csvlog lines CSVLOOP: while (!$csv->eof()) { # CSV columns information: # ------------------------ # timestamp with milliseconds # username # database name # Process id # Remote host and port # session id # Line number # PS display # session start timestamp # Virtual transaction id # Transaction id # Error severity # SQL state code # errmessage # errdetail or errdetail_log # errhint # internal query # internal pos # errcontext # user query # file error location # application name # backend type # leader PID # query id while (my $row = $csv->getline($lfile)) { $row =~ s/\r//; # We received a signal last CSVLOOP if ($terminate); # Number of columns in csvlog (21 before 9.0, 22 before 13.0, 25 in 14.0) next if ( ($#{$row} < 21) && ($#{$row} > 24) ); # Set progress statistics $cursize += length(join(',', @$row)); $nlines++; &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); next if ( ($row->[11] !~ $parse_regex) || ($row->[11] eq 'LOCATION')); # Extract the date if ($row->[0] =~ m/^(\d+)-(\d+)-(\d+)\s+(\d+):(\d+):(\d+)\.(\d+)/) { $prefix_vars{'t_year'} = $1; $prefix_vars{'t_month'} = $2; $prefix_vars{'t_day'} = $3; $prefix_vars{'t_hour'} = $4; $prefix_vars{'t_min'} = $5; $prefix_vars{'t_sec'} = $6; my $milli = $7 || 0; $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; # Remove newline characters from queries for (my $i = 0; $i <= $#$row; $i++) { $row->[$i] =~ s/[\r\n]+/ /gs; } # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); $getout = 2; last CSVLOOP; } # Jump to the last line parsed if required next if (($incremental || $last_parsed) && !&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, join(',', @$row))); # Set query parameters as global variables $prefix_vars{'t_dbuser'} = $row->[1] || ''; $prefix_vars{'t_dbname'} = $row->[2] || ''; $prefix_vars{'t_appname'} = $row->[22] || ''; $prefix_vars{'t_client'} = $row->[4] || ''; $prefix_vars{'t_client'} =~ s/:.*//; $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); $prefix_vars{'t_host'} = 'csv'; # this unused variable is used to store format information when log format is not syslog $prefix_vars{'t_pid'} = $row->[3]; $prefix_vars{'t_session_line'} = $row->[5]; $prefix_vars{'t_session_line'} =~ s/\..*//; $prefix_vars{'t_loglevel'} = $row->[11]; $prefix_vars{'t_query'} = $row->[13]; # Set ERROR additional information $prefix_vars{'t_detail'} = $row->[14]; $prefix_vars{'t_hint'} = $row->[15]; $prefix_vars{'t_context'} = $row->[18]; $prefix_vars{'t_statement'} = $row->[19]; $prefix_vars{'t_queryid'} = $row->[24] if ($#{$row} >= 24); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}, $prefix_vars{'t_pid'}, $prefix_vars{'t_dbname'}); # Update current timestamp with the timezone wanted if ($log_timezone) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = change_timezone($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # Parse the query now &parse_query($fmt); # The information can be saved immediately with csvlog if (exists $cur_info{$prefix_vars{'t_pid'}}) { &store_queries($prefix_vars{'t_pid'}); delete $cur_info{$prefix_vars{'t_pid'}}; } } } } if (!$csv->eof()) { warn "WARNING: cannot use CSV on $logfile, " . $csv->error_diag() . " at line " . ($nlines+1), "\n"; print STDERR "DETAIL: " . $csv->error_input(), "\n" if ($csv->error_input()); print STDERR "reset CSV parser\n"; $csv->SetDiag(0); } else { $cursize = $totalsize; } } } elsif ($fmt eq 'binary') { return $getout if (!load_stats($lfile)); $pipe->print("$totalsize 0 0 $fmt\n") if (defined $pipe); } # Format is not CSV and in incremental mode we are not at end of the file else { my $cur_pid = ''; my @matches = (); my $goon = ($incremental) ? 1 : 0; my $has_exclusion = 0; if ($#exclude_line >= 0) { $has_exclusion = 1; } &logmsg('DEBUG', "Start parsing postgresql log at offset $start_offset of file \"$logfile\" to " . ($stop_offset || $totalsize)); if (!$journalctl_cmd && !$log_command) { if ($start_offset) { # Move to the starting offset position in file $lfile->seek($start_offset, 0); } else { $lfile->seek(0, 0); } } while (my $line = <$lfile>) { # We received a signal last if ($terminate); # Get current size/offset in the log file $cursize += length($line) + (&get_eol_length() - 1); $current_offset += length($line) + (&get_eol_length() - 1); # Skip INFO line generated by other software next if ($line =~ /\bINFO: /); # Replace CR/LF by LF $line =~ s/\r//; # Start to exclude from parsing any desired lines if ($has_exclusion >= 0) { # Log line matches the excluded regex map { next if ($line =~ /$_/is); } @exclude_line; } chomp($line); $nlines++; next if (!$line); &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); %prefix_vars = (); # Parse jsonlog lines if ($is_json_log) { %prefix_vars = parse_jsonlog_input($line); if (exists $prefix_vars{'t_textPayload'}) { @matches = ($prefix_vars{'t_textPayload'} =~ $compiled_prefix); my $q_match = 0; if ($#matches < 0 && $q_prefix) { @matches = ($prefix_vars{'t_textPayload'} =~ $q_prefix); $q_match = 1; } if ($#matches >= 0) { if (!$q_match) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } } else { for (my $i = 0 ; $i <= $#prefix_q_params ; $i++) { $prefix_vars{$prefix_q_params[$i]} = $matches[$i]; } } } delete $prefix_vars{'t_textPayload'}; $prefix_vars{'t_pid'} = $prefix_vars{'t_session_id'} if ($use_sessionid_as_pid); # Skip location information next if ($prefix_vars{'t_loglevel'} eq 'LOCATION'); if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_mtimestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_session_timestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_session_timestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_epoch'}) { $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_epoch'})); if ($prefix_vars{'t_epoch'} =~ /^\d{10}(\.\d{3})$/) { $prefix_vars{'t_timestamp'} .= $1; } } elsif ($prefix_vars{'t_timestamp'} =~ /^\d{10}(\.\d{3})$/) { my $ms = $1; $prefix_vars{'t_epoch'} = $prefix_vars{'t_timestamp'}; $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_timestamp'})); $prefix_vars{'t_timestamp'} .= $ms; } if ($prefix_vars{'t_timestamp'}) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); } elsif ($prefix_vars{'t_year'}) { $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; # Skip this line if there is no timestamp next if (!$prefix_vars{'t_timestamp'} || $prefix_vars{'t_timestamp'} eq '-- ::'); if ($prefix_vars{'t_hostport'} && !$prefix_vars{'t_client'}) { $prefix_vars{'t_client'} = $prefix_vars{'t_hostport'}; # Remove the port part $prefix_vars{'t_client'} =~ s/\(.*//; } } $cur_pid = $prefix_vars{'t_pid'} if ($prefix_vars{'t_pid'}); $prefix_vars{'t_query'} =~ s/^\\t//; # Collect orphaned lines of multiline queries if (!$prefix_vars{'t_loglevel'} && $cur_pid) { # Some log line may be written by applications next if ($line =~ /\bLOG: /); # Parse orphan lines to append information to the right place &parse_orphan_line($cur_pid, $prefix_vars{'t_query'}); } elsif ($cur_pid) { # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); # Jump to the last line parsed if required next if (($incremental || $last_parsed) && !&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); # We have reach previous incremental position (or we not in increment mode) $goon = 1; # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}, $prefix_vars{'t_pid'}, $prefix_vars{'t_dbname'}); # Update current timestamp with the timezone wanted if ($log_timezone) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = change_timezone($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # The information can be saved when we are switching to a new main message if ($cur_pid && exists $cur_info{$cur_pid} && ($prefix_vars{'t_loglevel'} =~ /^(LOG|ERROR|FATAL|PANIC|WARNING)$/)) { &store_queries($cur_pid); delete $cur_info{$cur_pid}; } # Parse the query now &parse_query($fmt); } } elsif ($fmt ne 'jsonlog' && $line !~ /textpayload/i) { &logmsg('DEBUG', "Unknown $fmt line format: $line"); } } # Parse syslog lines elsif ($is_syslog) { @matches = ($line =~ $compiled_prefix); my $q_match = 0; if ($#matches < 0 && $q_prefix) { @matches = ($line =~ $q_prefix); $q_match = 1; } if ($#matches >= 0) { if (!$q_match) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } } else { for (my $i = 0 ; $i <= $#prefix_q_params ; $i++) { $prefix_vars{$prefix_q_params[$i]} = $matches[$i]; } } # skip non postgresql lines next if (exists $prefix_vars{'t_ident'} && $prefix_vars{'t_ident'} ne $ident); # Skip location information next if ($prefix_vars{'t_loglevel'} eq 'LOCATION'); # Standard syslog format does not have year information, months are # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); $prefix_vars{'t_month'} = $month_abbr{$prefix_vars{'t_month'}}; # Take care of year overlapping if ("$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}" > $CURRENT_DATE) { $prefix_vars{'t_year'} = substr($CURRENT_DATE, 0, 4) - 1; } } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; if ($prefix_vars{'t_hostport'} && !$prefix_vars{'t_client'}) { $prefix_vars{'t_client'} = $prefix_vars{'t_hostport'}; # Remove the port part $prefix_vars{'t_client'} =~ s/\(.*//; } # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); $getout = 2; last; } # Jump to the last line parsed if required next if (($incremental || $last_parsed) && !&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); # We have reach previous incremental position (or we not in increment mode) $goon = 1; $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}, $prefix_vars{'t_pid'}, $prefix_vars{'t_dbname'}); # Update current timestamp with the timezone wanted if ($log_timezone) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = change_timezone($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Extract information from log line prefix if (!$log_line_prefix) { &parse_log_prefix($prefix_vars{'t_logprefix'}); } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # The information can be saved when we are switching to a new main message if ($cur_pid && exists $cur_info{$cur_pid} && ($prefix_vars{'t_loglevel'} =~ /^(LOG|ERROR|FATAL|PANIC|WARNING)$/)) { &store_queries($cur_pid); delete $cur_info{$cur_pid} if (!$log_duration || ($cur_info{$cur_pid}{duration} ne '' && $cur_info{$cur_pid}{query} ne '')); } # Process the log line &parse_query($fmt); $cur_pid = $prefix_vars{'t_pid'}; } } elsif ($goon && ($line =~ $other_syslog_line)) { $cur_pid = $8; my $t_query = $10; if ($fmt eq 'logplex' && not exists $cur_info{$cur_pid}{cur_db}) { $cur_info{$cur_pid}{cur_db} = $9; } $t_query =~ s/#011/\t/g; next if ($t_query eq "\t"); # Some log line may be written by applications next if ($t_query =~ /\bLOG: /); # Parse orphan lines to append information to the right place &parse_orphan_line($cur_pid, $t_query); } # Collect orphaned lines of multiline queries elsif ($cur_pid) { # Some log line may be written by applications next if ($line =~ /\bLOG: /); # Parse orphan lines to append information to the right place &parse_orphan_line($cur_pid, $line); } else { &logmsg('DEBUG', "Unknown $fmt line format: $line"); } } elsif ($fmt eq 'stderr' or $fmt eq 'rds' or $fmt eq 'redshift') { @matches = ($line =~ $compiled_prefix); my $q_match = 0; if ($#matches < 0 && $q_prefix) { @matches = ($line =~ $q_prefix); $q_match = 1; } if ($#matches >= 0) { if (!$q_match) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } } else { for (my $i = 0 ; $i <= $#prefix_q_params ; $i++) { $prefix_vars{$prefix_q_params[$i]} = $matches[$i]; } } $prefix_vars{'t_client'} =~ s/\(.*// if ($fmt eq 'rds'); $prefix_vars{'t_pid'} = $prefix_vars{'t_session_id'} if ($use_sessionid_as_pid); # Skip location information next if ($prefix_vars{'t_loglevel'} eq 'LOCATION'); if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_mtimestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_session_timestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_session_timestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_epoch'}) { $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_epoch'})); if ($prefix_vars{'t_epoch'} =~ /^\d{10}(\.\d{3})$/) { $prefix_vars{'t_timestamp'} .= $1; } } elsif ($prefix_vars{'t_timestamp'} =~ /^\d{10}(\.\d{3})$/) { my $ms = $1; $prefix_vars{'t_epoch'} = $prefix_vars{'t_timestamp'}; $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_timestamp'})); $prefix_vars{'t_timestamp'} .= $ms; } if ($prefix_vars{'t_timestamp'}) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); } elsif ($prefix_vars{'t_year'}) { $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; # Skip this line if there is no timestamp next if (!$prefix_vars{'t_timestamp'} || $prefix_vars{'t_timestamp'} eq '-- ::'); if ($prefix_vars{'t_hostport'} && !$prefix_vars{'t_client'}) { $prefix_vars{'t_client'} = $prefix_vars{'t_hostport'}; # Remove the port part $prefix_vars{'t_client'} =~ s/\(.*//; } $force_sample = 1 if ($fmt eq 'redshift' && $prefix_vars{'t_loglevel'} eq 'LOG' && $prefix_vars{'t_client'} !~ /duration: /); # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count, $fmt); $getout = 2; last; } # Jump to the last line parsed if required next if (($incremental || $last_parsed) && !&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); # We have reach previous incremental position (or we not in increment mode) $goon = 1; $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}, $prefix_vars{'t_pid'}, $prefix_vars{'t_dbname'}); # Update current timestamp with the timezone wanted if ($log_timezone) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = change_timezone($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}); $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } # Extract information from log line prefix if (!$log_line_prefix) { &parse_log_prefix($prefix_vars{'t_logprefix'}); } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # this unused variable is used to store format information # when log format is not syslog $prefix_vars{'t_host'} = 'stderr'; # The information from previous loop can be saved # when we are switching to a new main message if ($cur_pid && exists $cur_info{$cur_pid} && $prefix_vars{'t_loglevel'} =~ /^(LOG|ERROR|FATAL|PANIC|WARNING)$/) { &store_queries($cur_pid); delete $cur_info{$cur_pid} if (!$log_duration || ($cur_info{$cur_pid}{duration} ne '' && $cur_info{$cur_pid}{query} ne '')); } # Process the log line &parse_query($fmt); $cur_pid = $prefix_vars{'t_pid'}; } } # Collect additional query information elsif ($goon && $cur_pid) { # Some log line may be written by applications next if ($line =~ /\bLOG: /); # Parse orphan lines to append information to the right place &parse_orphan_line($cur_pid, $line); } elsif ($goon) { # unknown format &logmsg('DEBUG', "Unknown $fmt log line format: $line"); } } last if (($stop_offset > 0) && ($current_offset >= $stop_offset)); } if ($goon && $last_parsed) { &logmsg('DEBUG', "setting current position in log to $current_offset"); $last_line{current_pos} = $current_offset; } } close $lfile; # Inform the parent that it should stop parsing other files if ($terminate) { if ($^O !~ /MSWin32|dos/i) { kill('USR2', $parent_pid); } else { kill('TERM', $parent_pid); } return $terminate; } # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { # Stores last query information &store_queries($pid, 1); } # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } # Stores last cancelled queries information foreach my $pid (keys %cur_cancel_info) { &store_temporary_and_lock_infos($pid); } # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } if ($extens eq 'tsung') { foreach my $pid (sort {$a <=> $b} keys %tsung_session) { &store_tsung_session($pid); } } if ($progress && ($getout != 1)) { # Bzip2 and remote download compressed files has an # estimated size. Force 100% at end of log parsing if (($http_download && $logfile =~ $compress_extensions ) || $logfile =~ /\.bz2$/i) { $cursize = $totalsize; } if (!$tmpoutfile || not defined $pipe) { print STDERR &progress_bar($cursize, $stop_offset || $totalsize, 25, '=', ($overall_stat{'queries_number'} + $pgb_overall_stat{'queries_number'}), ($overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'}), $fmt ); print STDERR "\n"; } else { $pipe->print("$cursize " . ($overall_stat{'queries_number'} + $pgb_overall_stat{'queries_number'} - $old_queries_count) . " " . ($overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'} - $old_errors_count) . " $fmt\n"); } } # Case where we build reports from binary only with no new log entries. if ($incremental && $html_outdir && !$outdir) { if ($logfile =~ /\/(\d+)\/(\d+)\/(\d+)\/[^\/]+\.bin$/) { $last_line{datetime} = "$1-$2-$3"; } } %cur_info = (); # In incremental mode data are saved to disk per day if ($incremental && ($last_line{datetime} || (($fmt =~ /pgbouncer/) && $pgb_last_line{datetime}))) { $incr_date = ($fmt =~ /pgbouncer/) ? $pgb_last_line{datetime} : $last_line{datetime}; $incr_date =~ s/\s.*$//; # set path and create subdirectories if ($incr_date =~ /^(\d+)-(\d+)-(\d+)/) { mkdir("$outdir/$1") if (!-d "$outdir/$1"); mkdir("$outdir/$1/$2") if (!-d "$outdir/$1/$2"); mkdir("$outdir/$1/$2/$3") if (!-d "$outdir/$1/$2/$3"); } else { &logmsg('ERROR', "invalid incremental date: $incr_date, can not create subdirectories."); } my $bpath = $incr_date; $bpath =~ s/\-/\//g; # Mark the directory as needing index update if (open(my $out, '>>', "$last_parsed.tmp")) { flock($out, 2) || return $getout; print $out "$incr_date\n"; close($out); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed.tmp, $!"); } # Save binary data if ($outdir) { my $filenum = $$; $filenum++ while (-e "$outdir/$bpath/$incr_date-$filenum.bin"); my $fhb = new IO::File ">$outdir/$bpath/$incr_date-$filenum.bin"; if (not defined $fhb) { localdie("FATAL: can't write to $outdir/$bpath/$incr_date-$filenum.bin, $!\n"); } &dump_as_binary($fhb); $fhb->close; } } elsif (fileno($tmpoutfile)) { &dump_as_binary($tmpoutfile); $tmpoutfile->close(); } # Save last line into temporary file if ($last_parsed && (scalar keys %last_line || scalar keys %pgb_last_line)) { if (open(my $out, '>>', "$tmp_last_parsed")) { flock($out, 2) || return $getout; if ($fmt =~ /pgbouncer/) { $pgb_last_line{current_pos} ||= 0; &logmsg('DEBUG', "Saving pgbouncer last parsed line into $tmp_last_parsed ($pgb_last_line{datetime}\t$pgb_last_line{current_pos})"); print $out "pgbouncer\t$pgb_last_line{datetime}\t$pgb_last_line{current_pos}\t$pgb_last_line{orig}\n"; } else { $last_line{current_pos} ||= 0; &logmsg('DEBUG', "Saving last parsed line into $tmp_last_parsed ($last_line{datetime}\t$last_line{current_pos})"); print $out "$last_line{datetime}\t$last_line{current_pos}\t$last_line{orig}\n"; } close($out); } else { &logmsg('ERROR', "can't save last parsed line into $tmp_last_parsed, $!"); } } # Inform the parent that it should stop parsing other files if ($getout) { if ($^O !~ /MSWin32|dos/i) { kill('USR2', $parent_pid); } else { kill('TERM', $parent_pid); } } # Write the list of database we have proceeded in this process if ($report_per_database) { if (open(my $out, '>>', "$tmp_dblist")) { flock($out, 2) || return $getout; print $out join(';', %{ $overall_stat{nlines} }), "\n"; close($out); } else { &logmsg('ERROR', "can't save last parsed line into $tmp_dblist, $!"); } } &init_stats_vars() if ($tmpoutfile); return $getout; } sub unescape_jsonlog { my $str = shift; while ($str =~ s/([^\\])\\"/$1"/g) {}; while ($str =~ s/([^\\])\\t/$1\t/g) {}; while ($str =~ s/\\r\\n/\n/gs) {}; while ($str =~ s/([^\\])\\r/$1\n/gs) {}; while ($str =~ s/([^\\])\\n/$1\n/gs) {}; return $str; } sub parse_jsonlog_input { my $str = shift; my %infos = (); # json columns information from jsonlog extension: # ------------------------------------------------- # timestamp with milliseconds # username # database name # Process id # Remote host and port # session id # Line number* # PS display * # session start timestamp * # Virtual transaction id # Transaction id # Error severity # SQL state code # errdetail or errdetail_log # errhint # internal query # internal pos * # errcontext # user query # file error location # application name # errmessage # backend type * # leader PID * # query id * # # (*) information not available for the moment # Extract the date if ($str =~ m/[\{,]"timestamp":\s*"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)\.(\d+)/) { $infos{'t_year'} = $1; $infos{'t_month'} = $2; $infos{'t_day'} = $3; $infos{'t_hour'} = $4; $infos{'t_min'} = $5; $infos{'t_sec'} = $6; my $milli = $7 || 0; $infos{'t_timestamp'} = "$infos{'t_year'}-$infos{'t_month'}-$infos{'t_day'} $infos{'t_hour'}:$infos{'t_min'}:$infos{'t_sec'}"; $infos{'t_time'} = "$infos{'t_hour'}:$infos{'t_min'}:$infos{'t_sec'}"; } # Set query parameters as global variables if ($str =~ m/"user":\s*"(.*?)"(?:,"|\})/) { $infos{'t_dbuser'} = $1; } elsif ($str =~ m/,user=([\s]*) /) { $infos{'t_dbuser'} = $1; } if ($str =~ m/"dbname":\s*"(.*?)"(?:,"|\})/) { $infos{'t_dbname'} = $1; } elsif ($str =~ m/ db=([^,]*),/) { $infos{'t_dbname'} = $1; } if ($str =~ m/"application_name":\s*"(.*?)"(?:,"|\})/) { $infos{'t_appname'} = $1; } elsif ($str =~ m/"project_id":\s*"([^"])"/) { $infos{'t_appname'} = $1; } if ($str =~ m/"remote_host":\s*"(.*?)"(?:,"|\})/) { $infos{'t_client'} = $1; $infos{'t_client'} =~ s/:.*//; $infos{'t_client'} = _gethostbyaddr($infos{'t_client'}) if ($dns_resolv); } $infos{'t_host'} = 'jsonlog'; # this unused variable is used to store format information when log format is not syslog # Try to extract the pid information if ($str =~ m/"pid":\s*"(.*?)"(?:,"|\})/) { $infos{'t_pid'} = $1; } elsif ($str =~ m/"pid":\s*([0-9]+)/) { $infos{'t_pid'} = $1; } elsif ($str =~ m/"textPayload":\s*"\[(\d+)\]:/) { $infos{'t_pid'} = $1; } elsif ($str =~ m/"textPayload":\s*"(\d+-\d+-\d+[T\s]\d+:\d+:\d+\.\d+\s[^\s]*\s+\[(\d+)\]:.*)",(?:"timestamp":.*)?$/) { $infos{'t_pid'} = $2; $infos{'t_textPayload'} = $1; } elsif ($str =~ m/"textPayload":\s*"(.*)","timestamp":"/) { $infos{'t_query'} = unescape_jsonlog($1); } if ($str =~ m/"error_severity":\s*"(.*?)"(?:,"|\})/) { $infos{'t_loglevel'} = $1; } elsif ($str =~ m/user=[^\s]* ([^:]+):\s{1,2}(.*)","timestamp":"/) { $infos{'t_loglevel'} = $1; $infos{'t_query'} = unescape_jsonlog($2); } if ($str =~ m/"state_code":\s*"(.*?)"(?:,"|\})/) { $infos{'t_sqlstate'} = $1; } if ($str =~ m/"message":\s*"(.*?)"(?:,"|\})/) { $infos{'t_query'} = unescape_jsonlog($1); } elsif ($str =~ m/"statement":\s*"(.*?)"(?:,"|\})/) { $infos{'t_query'} = unescape_jsonlog($1); } # Set ERROR additional information if ($str =~ m/"(?:detail_log|detail)":\s*"(.*?)"(?:,"|\})/) { $infos{'t_detail'} = unescape_jsonlog($1); } if ($str =~ m/"hint":\s*"(.*?)"(?:,"|\})/) { $infos{'t_hint'} = unescape_jsonlog($1); } if ($str =~ m/"context":\s*"(.*?)"(?:,"|\})/) { $infos{'t_context'} = unescape_jsonlog($1); } if ($str =~ m/"(?:statement|internal_query)":\s*"(.*?)"(?:,"|\})/) { $infos{'t_statement'} = unescape_jsonlog($1); } # Backend type information if ($str =~ m/"backend_type":\s*"(.*?)"(?:,"|\})/) { $infos{'t_backend_type'} = $1; } return %infos; } sub parse_orphan_line { my ($cur_pid, $line, $t_dbname) = @_; my $curdb = undef; if (!exists $cur_info{$cur_pid} || !exists $cur_info{$cur_pid}{cur_db} || !$cur_info{$cur_pid}{cur_db}) { $curdb = set_current_db($t_dbname); } else { $curdb = $cur_info{$cur_pid}{cur_db}; } if (!$report_per_database) { $curdb = $DBALL; } # Store vacuum related information if ($cur_info{$cur_pid}{vacuum} && ($line =~ /^\t*(pages|tuples|buffer usage|avg read rate|system usage|WAL usage):/)) { if ($line =~ /(pages|tuples): (\d+) removed, (\d+) remain/) { $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{$1}{removed} += $2; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{$1}{remain} += $3; } if ($line =~ /(\d+) are dead but not yet removable/) { $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{tuples}{notremovable} += $1; } if ($line =~ m#^\t?system usage: CPU .* (?:sec|s,) elapsed (.*) s#) { if ($1 > $autovacuum_info{$curdb}{peak}{system_usage}{elapsed}) { $autovacuum_info{$curdb}{peak}{system_usage}{elapsed} = $1; $autovacuum_info{$curdb}{peak}{system_usage}{table} = $cur_info{$cur_pid}{vacuum}; $autovacuum_info{$curdb}{peak}{system_usage}{date} = "$cur_info{$cur_pid}{year}-$cur_info{$cur_pid}{month}-$cur_info{$cur_pid}{day} " . "$cur_info{$cur_pid}{hour}:$cur_info{$cur_pid}{min}:$cur_info{$cur_pid}{sec}"; } } if ($line =~ /, (\d+) skipped due to pins, (\d+) skipped frozen/) { $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{skip_pins} += $1; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{skip_frozen} += $2; } if ($line =~ /buffer usage: (\d+) hits, (\d+) misses, (\d+) dirtied/) { $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{hits} += $1; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{misses} += $2; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{dirtied} += $3; } if ($line =~ /WAL usage: (\d+) records, (\d+) full page images, (\d+) bytes/) { $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{wal_record} += $1; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{wal_full_page} += $2; $autovacuum_info{$curdb}{tables}{$cur_info{$cur_pid}{vacuum}}{wal_bytes} += $3; } } # stores bind parameters if parameter syntax is detected elsif ( $cur_info{$cur_pid}{parameters}) { if (!$error_only) { $cur_info{$cur_pid}{parameters} .= ' ' if ($cur_info{$cur_pid}{parameters} =~ /=$/); $cur_info{$cur_pid}{parameters} .= "$line"; } } # stores explain plan lines elsif (exists $cur_plan_info{$cur_pid}{plan}) { $cur_plan_info{$cur_pid}{plan} .= "\n" . $line; } # If we have previously stored a temporary file query, append to that query elsif (exists $cur_temp_info{$cur_pid}{size}) { $cur_temp_info{$cur_pid}{query} .= "\n" . $line; } # If we have previously stored a query that generates locks, append to that query elsif (exists $cur_lock_info{$cur_pid}{query}) { $cur_lock_info{$cur_pid}{query} .= "\n" . $line; } # If we have previously stored a cancelled query, append to that query elsif (exists $cur_cancel_info{$cur_pid}{query}) { $cur_cancel_info{$cur_pid}{query} .= "\n" . $line; } # Otherwise append the orphan line to the corresponding part of the query else { # Append to the error statement if one is defined if (exists $cur_info{$cur_pid}{statement}) { $cur_info{$cur_pid}{statement} .= "\n" . $line if (!$nomultiline); # Append to the bind parameters if one is defined } elsif (exists $cur_info{$cur_pid}{parameters}) { $cur_info{$cur_pid}{parameters} .= $line if (!$error_only); # Append to the error context if one is defined } elsif (exists $cur_info{$cur_pid}{context}) { $cur_info{$cur_pid}{context} .= "\n" . $line; # Append to the query detail if one is defined } elsif (exists $cur_info{$cur_pid}{detail}) { $cur_info{$cur_pid}{detail} .= "\n" . $line; # After all append to the query if one is defined } elsif (exists $cur_info{$cur_pid}{query}) { $cur_info{$cur_pid}{query} .= "\n" . $line if (!$nomultiline && !$error_only); # Associate to bind|prepare query now that& we collect it too } elsif (exists $cur_bind_info{$cur_pid}{'bind'}) { $cur_bind_info{$cur_pid}{'bind'}{'query'} .= "\n" . $line; } elsif (exists $cur_bind_info{$cur_pid}{'prepare'}) { $cur_bind_info{$cur_pid}{'prepare'}{'query'} .= "\n" . $line; } } } # Store the current timestamp of the log line sub store_current_timestamp { my ($t_timestamp, $t_pid, $t_dbname) = @_; # Store current report name and list of database my $curdb = undef; if (!exists $cur_info{$t_pid} || !exists $cur_info{$t_pid}{cur_db}) { $curdb = set_current_db($t_dbname); } else { $curdb = $cur_info{$t_pid}{cur_db}; } if ($is_tsung_output) { $prefix_vars{'t_date'} = $t_timestamp; $prefix_vars{'t_date'} =~ s/\D+//g; } if (!$overall_stat{$curdb}{'first_log_ts'} || ($overall_stat{$curdb}{'first_log_ts'} gt $t_timestamp)) { $overall_stat{$curdb}{'first_log_ts'} = $t_timestamp; } if (!$overall_stat{$curdb}{'last_log_ts'} || ($overall_stat{$curdb}{'last_log_ts'} lt $t_timestamp)) { $overall_stat{$curdb}{'last_log_ts'} = $t_timestamp; } } sub detect_new_log_line { my ($lfile, $fmt, $current_date, $gyear, $saved_date, $startoffset) = @_; my $more_lines = 0; while (my $line = <$lfile>) { $line =~ s/\r//; chomp($line); next if (!$line); if ($fmt =~ /syslog|logplex/) { my @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } # Standard syslog format does not have year information, months are # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); $prefix_vars{'t_month'} = $month_abbr{$prefix_vars{'t_month'}}; # Take care of year overlapping if ("$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}" > $current_date) { $prefix_vars{'t_year'} = substr($current_date, 0, 4) - 1; } } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_time'}"; } } elsif ($fmt eq 'jsonlog') { %prefix_vars = parse_jsonlog_input($line); if (!exists $prefix_vars{'t_year'}) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } elsif ($fmt =~ /pgbouncer/) { my @matches = ($line =~ $pgbouncer_log_parse1); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#pgb_prefix_parse1 ; $i++) { $prefix_vars{$pgb_prefix_parse1[$i]} = $matches[$i]; } } } else { my @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } $prefix_vars{'t_client'} =~ s/\(.*// if ($fmt eq 'rds'); if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_mtimestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_session_timestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_session_timestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_epoch'}) { $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_epoch'})); if ($prefix_vars{'t_epoch'} =~ /^\d{10}(\.\d{3})$/) { $prefix_vars{'t_timestamp'} .= $1; } } elsif ($prefix_vars{'t_timestamp'} =~ /^\d{10}(\.\d{3})$/) { my $ms = $1; $prefix_vars{'t_epoch'} = $prefix_vars{'t_timestamp'}; $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_timestamp'})); $prefix_vars{'t_timestamp'} .= $ms; } } if ($prefix_vars{'t_timestamp'}) { ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); } elsif ($prefix_vars{'t_year'}) { $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } $prefix_vars{'t_time'} = "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; # Skip this line if there is no timestamp next if (!$prefix_vars{'t_timestamp'} || $prefix_vars{'t_timestamp'} eq '-- ::'); } # Unwanted line next if (!$prefix_vars{'t_timestamp'}); # This file has already been parsed if ($saved_date gt $prefix_vars{'t_timestamp'}) { close($lfile); return (0, "timestamp $prefix_vars{'t_timestamp'} read at offset $startoffset is lower than saved timestamp: $saved_date"); } elsif ($saved_date eq $prefix_vars{'t_timestamp'}) { next; } else { $more_lines++; last; } } return ($more_lines, "more line after the saved position"); } # Method used to check if the file stores logs after the last incremental position or not # This position should have been saved in the incremental file and read in the $last_parsed at # start up. Here we just verify that the first date in file is before the last incremental date. sub check_file_changed { my ($file, $totalsize, $fmt, $saved_date, $saved_pos, $look_at_beginning) = @_; # Incremental mode is not possible for the moment with http download return 1 if ($file =~ /^(http[s]*:|[s]*ftp:)/); my $lfile = &get_log_file($file, $totalsize); return if (!defined $lfile); # Compressed files do not allow seeking my $iscompressed = ($file =~ $compress_extensions) ? 1 : 0; if ($iscompressed) { close($lfile); return (1, "log file is compressed"); } my ($gsec, $gmin, $ghour, $gmday, $gmon, $gyear, $gwday, $gyday, $gisdst) = localtime(time); $gyear += 1900; my $current_date = $gyear . sprintf("%02d", $gmon + 1) . sprintf("%02d", $gmday); %prefix_vars = (); my $startoffset = 0; # If seeking is not explicitely disabled if (!$look_at_beginning) { # do not seek if filesize is smaller than the seek position if ($saved_pos < $totalsize) { $lfile->seek($saved_pos || 0, 0); $startoffset = $saved_pos || 0; } # Case of file with same size elsif ($saved_pos == $totalsize) { # A log line can not be greater than 8192 so rewind to # previous line, we don't care if we rewind more line # before as if this is the same log they are in the past if ($saved_pos > 8192) { $lfile->seek($saved_pos - 8192, 0); $startoffset = $saved_pos - 8192; } else { $lfile->seek(0, 0); $startoffset = 0; } } } my ($more_lines, $msg) = detect_new_log_line($lfile, $fmt, $current_date, $gyear, $saved_date, $startoffset); if (!$more_lines) { $lfile->seek(0, 0); $startoffset = 0; ($more_lines, $msg) = detect_new_log_line($lfile, $fmt, $current_date, $gyear, $saved_date, $startoffset); if (!$more_lines) { close($lfile); return (0, "there no new lines in this file"); } } close($lfile); return (1, "reach the end of check_file_changed() with start date: $saved_date and file size: $totalsize") ; } # Method used to check if we have already reached the last parsing position in incremental mode # This position should have been saved in the incremental file and read in the $last_parsed at # start up. sub check_incremental_position { my ($fmt, $cur_date, $line) = @_; if ($last_parsed && ($fmt !~ /pgbouncer/)) { if ($saved_last_line{datetime}) { if ($cur_date lt $saved_last_line{datetime}) { return 0; } elsif (!$last_line{datetime} && ($cur_date eq $saved_last_line{datetime})) { return 0 if ($line ne $saved_last_line{orig}); } } $last_line{datetime} = $cur_date; $last_line{orig} = $line; } elsif ($last_parsed) { if ($pgb_saved_last_line{datetime}) { if ($cur_date lt $pgb_saved_last_line{datetime}) { return 0; } elsif (!$pgb_last_line{datetime} && ($cur_date eq $pgb_saved_last_line{datetime})) { return 0 if ($line ne $pgb_saved_last_line{orig}); } } $pgb_last_line{datetime} = $cur_date; $pgb_last_line{orig} = $line; } # In incremental mode data are saved to disk per day if ($incremental) { $cur_date =~ s/\s.*$//; # Check if the current day has changed, if so save data $incr_date = $cur_date if (!$incr_date); if ($cur_date gt $incr_date) { # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { # Stores last queries information &store_queries($pid, 1); } # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } # Stores last cancelled queries information foreach my $pid (keys %cur_cancel_info) { &store_temporary_and_lock_infos($pid); } # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } # Stores tsung sessions if ($extens eq 'tsung') { foreach my $pid (sort {$a <=> $b} keys %tsung_session) { &store_tsung_session($pid); } } # set path and create subdirectories if ($incr_date =~ /^(\d+)-(\d+)-(\d+)/) { mkdir("$outdir/$1") if (!-d "$outdir/$1"); mkdir("$outdir/$1/$2") if (!-d "$outdir/$1/$2"); mkdir("$outdir/$1/$2/$3") if (!-d "$outdir/$1/$2/$3"); } else { &logmsg('ERROR', "invalid incremental date: $incr_date, can not create subdirectories."); } my $bpath = $incr_date; $bpath =~ s/\-/\//g; # Mark this directory as needing a reindex if (open(my $out, '>>' , "$last_parsed.tmp")) { flock($out, 2) || return 1; print $out "$incr_date\n"; close($out); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed.tmp, $!"); } # Save binary data my $filenum = $$; $filenum++ while (-e "$outdir/$bpath/$incr_date-$filenum.bin"); my $fhb = new IO::File ">$outdir/$bpath/$incr_date-$filenum.bin"; if (not defined $fhb) { localdie("FATAL: can't write to $outdir/$bpath/$incr_date-$filenum.bin, $!\n"); } &dump_as_binary($fhb); $fhb->close; $incr_date = $cur_date; &init_stats_vars(); } } return 1; } # Display message following the log level sub logmsg { my ($level, $str) = @_; return if ($quiet && !$debug && ($level ne 'FATAL')); return if (!$debug && ($level eq 'DEBUG')); if ($level =~ /(\d+)/) { print STDERR "\t" x $1; } print STDERR "$level: $str\n"; } # Remove quote from alias for normalisation sub remove_alias { my $str = shift(); $str =~ s/'//gs; return $str; } # Normalize SQL queries by removing parameters sub normalize_query { my $orig_query = shift; return if (!$orig_query); # Remove comments /* ... */ if (!$keep_comments) { $orig_query =~ s/\/\*(.*?)\*\///gs; } # Keep case on object name between doublequote my %objnames = (); my $i = 0; while ($orig_query =~ s/("[^"]+")/%%OBJNAME$i%%/) { $objnames{$i} = $1; $i++; } # Set the entire query lowercase $orig_query = lc($orig_query); # Restore object name while ($orig_query =~ s/\%\%objname(\d+)\%\%/$objnames{$1}/gs) {}; %objnames = (); # Remove string content $orig_query =~ s/\\'//gs; $orig_query =~ s/'[^']*'/\?/gs; $orig_query =~ s/\?(\?)+/\?/gs; # Remove comments starting with -- if (!$keep_comments) { $orig_query =~ s/\s*--[^\n]+[\n]/\n/gs; } # Remove extra space, new line and tab characters by a single space $orig_query =~ s/\s+/ /gs; # Removed start of transaction if ($orig_query !~ /^\s*begin\s*;\s*$/) { $orig_query =~ s/^\s*begin\s*;\s*//gs } # Normalise alias with quote $orig_query =~ s/AS\s+"([^"]+)"/'AS "' . remove_alias($1) . '"'/eigs; # Remove NULL parameters $orig_query =~ s/=\s*null/= \?/gs; # remove temporary identifier between double quote my %identifiers = (); $i = 0; while ($orig_query =~ s/"([^"]+)"/\%sqlident$i\%/) { $identifiers{$i} = $1; $i++; } # Remove numbers $orig_query =~ s/([^a-z0-9_\$\-])-?\d+/$1\?/gs; # Remove hexadecimal numbers $orig_query =~ s/([^a-z_\$-])0x[0-9a-f]{1,10}/$1\?/gs; # Remove bind parameters $orig_query =~ s/\$\d+/\?/gs; # restore identifiers $orig_query =~ s/\%sqlident(\d+)\%/"$identifiers{$1}"/gs; # Remove IN values $orig_query =~ s/\bin\s*\([\'0x,\s\?]*\)/in (...)/gs; # Remove curor names in CURSOR and IN clauses $orig_query =~ s/\b(declare|in|deallocate|close)\s+"[^"]+"/$1 "..."/gs; # Normalise cursor name $orig_query =~ s/\bdeclare\s+[^"\s]+\s+cursor/declare "..." cursor/gs; $orig_query =~ s/\b(fetch\s+next\s+from)\s+[^\s]+/$1 "..."/gs; $orig_query =~ s/\b(deallocate|close)\s+[^"\s]+/$1 "..."/gs; # Remove any leading whitespace $orig_query =~ s/^\s+//; # Remove any trailing whitespace $orig_query =~ s/\s+$//; # Remove any whitespace before a semicolon $orig_query =~ s/\s+;/;/; return $orig_query; } sub anonymized_string { my ( $before, $original, $after, $cache ) = @_; # Prevent dates from being anonymized return $original if $original =~ m{\A\d\d\d\d[/:-]\d\d[/:-]\d\d\z}; return $original if $original =~ m{\A\d\d[/:-]\d\d[/:-]\d\d\d\d\z}; # Prevent dates format like DD/MM/YYYY HH24:MI:SS from being anonymized return $original if $original =~ m{ \A (?:FM|FX|TM)? (?: HH | HH12 | HH24 | MI | SS | MS | US | SSSS | AM | A\.M\. | am | a\.m\. | PM | P\.M\. | pm | p\.m\. | Y,YYY | YYYY | YYY | YY | Y | IYYY | IYY | IY | I | BC | B\.C\. | bc | b\.c\. | AD | A\.D\. | ad | a\.d\. | MONTH | Month | month | MON | Mon | mon | MM | DAY | Day | day | DY | Dy | dy | DDD | DD | D | W | WW | IW | CC | J | Q | RM | rm | TZ | tz | [\s/:-] )+ (?:TH|th|SP)? \z }; # Prevent interval from being anonymized return $original if ($before && ($before =~ /interval/i)); return $original if ($after && ($after =~ /^\)*::interval/i)); # Range of characters to use in anonymized strings my @chars = ( 'A' .. 'Z', 0 .. 9, 'a' .. 'z', '-', '_', '.' ); unless ( $cache->{ $original } ) { # Actual anonymized version generation $cache->{ $original } = join( '', map { $chars[ rand @chars ] } 1 .. 10 ); } return $cache->{ $original }; } sub anonymized_number { my ( $original, $cache ) = @_; # Range of number to use in anonymized strings my @numbers = ( 0 .. 9 ); unless ( $cache->{ $original } ) { # Actual anonymized version generation $cache->{ $original } = join( '', map { $numbers[ rand @numbers ] } 1 .. 4 ); } return $cache->{ $original }; } # Anonymize litteral in SQL queries by replacing parameters with fake values sub anonymize_query { my $orig_query = shift; return $orig_query if (!$orig_query || !$anonymize); # Variable to hold anonymized versions, so we can provide the same value # for the same input, within single query. my $anonymization_cache = {}; # Remove comments if (!$keep_comments) { $orig_query =~ s/\/\*(.*?)\*\///gs; } # Clean query $orig_query =~ s/\\'//g; $orig_query =~ s/('')+/\$EMPTYSTRING\$/g; # Anonymize each values $orig_query =~ s{ ([^\s\']+[\s\(]*) # before '([^']*)' # original ([\)]*::\w+)? # after }{$1 . "'" . anonymized_string($1, $2, $3, $anonymization_cache) . "'" . ($3||'')}xeg; $orig_query =~ s/\$EMPTYSTRING\$/''/gs; # obfuscate numbers too if this is not parameter indices ($1 ...) $anonymization_cache = {}; $orig_query =~ s{([^\$])\b(\d+)\b}{ $1 . anonymized_number($1, $anonymization_cache) }xeg; return $orig_query; } # Format numbers with comma for betterparam_cache reading sub comma_numbers { return 0 if ($#_ < 0); return 0 if (!$_[0]); my $text = reverse $_[0]; $text =~ s/(\d\d\d)(?=\d)(?!\d*\.)/$1$num_sep/g; return scalar reverse $text; } # Format numbers with comma for better reading sub pretty_print_size { my $val = shift; return 0 if (!$val); if ($val >= 1125899906842624) { $val = ($val / 1125899906842624); $val = sprintf("%0.2f", $val) . " PiB"; } elsif ($val >= 1099511627776) { $val = ($val / 1099511627776); $val = sprintf("%0.2f", $val) . " TiB"; } elsif ($val >= 1073741824) { $val = ($val / 1073741824); $val = sprintf("%0.2f", $val) . " GiB"; } elsif ($val >= 1048576) { $val = ($val / 1048576); $val = sprintf("%0.2f", $val) . " MiB"; } elsif ($val >= 1024) { $val = ($val / 1024); $val = sprintf("%0.2f", $val) . " KiB"; } else { $val = $val . " B"; } return $val; } # Format duration sub convert_time { my $time = shift; return '0ms' if (!$time); my $days = int($time / 86400000); $time -= ($days * 86400000); my $hours = int($time / 3600000); $time -= ($hours * 3600000); my $minutes = int($time / 60000); $time -= ($minutes * 60000); my $seconds = int($time / 1000); $time -= ($seconds * 1000); my $milliseconds = sprintf("%.3d", $time); $days = $days < 1 ? '' : $days . 'd'; $hours = $hours < 1 ? '' : $hours . 'h'; $minutes = $minutes < 1 ? '' : $minutes . 'm'; $seconds = $seconds < 1 ? '' : $seconds . 's'; $milliseconds = $milliseconds < 1 ? '' : $milliseconds . 'ms'; if ($days || $hours || $minutes) { $milliseconds = ''; } elsif ($seconds) { $milliseconds =~ s/\.\d+//; } $milliseconds =~ s/^[0]+// if ($milliseconds !~ /\./); $time = $days . $hours . $minutes . $seconds . $milliseconds; $time = '0ms' if ($time eq ''); return $time; } # Stores the top N queries generating the biggest temporary file sub set_top_tempfile_info { my ($curdb, $q, $sz, $date, $db, $user, $remote, $app, $info) = @_; push(@{$top_tempfile_info{$curdb}}, [($sz, $date, $q, $db, $user, $remote, $app, $info)]); my @tmp_top_tempfile_info = sort {$b->[0] <=> $a->[0]} @{$top_tempfile_info{$curdb}}; @{$top_tempfile_info{$curdb}} = (); for (my $i = 0; $i <= $#tmp_top_tempfile_info; $i++) { push(@{$top_tempfile_info{$curdb}}, $tmp_top_tempfile_info[$i]); last if ($i == $end_top); } } # Stores top N slowest sample queries sub set_top_prepare_bind_sample { my ($type, $q, $dt, $t, $param, $db, $user, $remote, $app) = @_; return if ($sample <= 0); if ($type eq 'prepare') { $prepare_info{$db}{$q}{samples}{$dt}{query} = $q; $prepare_info{$db}{$q}{samples}{$dt}{date} = $t; $prepare_info{$db}{$q}{samples}{$dt}{db} = $db; $prepare_info{$db}{$q}{samples}{$dt}{user} = $user; $prepare_info{$db}{$q}{samples}{$dt}{remote} = $remote; $prepare_info{$db}{$q}{samples}{$dt}{app} = $app; $prepare_info{$db}{$q}{samples}{$dt}{params} = $param; my $i = 1; foreach my $k (sort {$b <=> $a} keys %{$prepare_info{$db}{$q}{samples}}) { if ($i > $sample) { delete $prepare_info{$db}{$q}{samples}{$k}; } $i++; } } if ($type eq 'bind') { $bind_info{$db}{$q}{samples}{$dt}{query} = $q; $bind_info{$db}{$q}{samples}{$dt}{date} = $t; $bind_info{$db}{$q}{samples}{$dt}{db} = $db; $bind_info{$db}{$q}{samples}{$dt}{user} = $user; $bind_info{$db}{$q}{samples}{$dt}{remote} = $remote; $bind_info{$db}{$q}{samples}{$dt}{app} = $app; $bind_info{$db}{$q}{samples}{$dt}{params} = $param; my $i = 1; foreach my $k (sort {$b <=> $a} keys %{$bind_info{$db}{$q}{samples}}) { if ($i > $sample) { delete $bind_info{$db}{$q}{samples}{$k}; } $i++; } } } # Stores the top N queries cancelled sub set_top_cancelled_info { my ($curdb, $q, $sz, $date, $db, $user, $remote, $app) = @_; push(@{$top_cancelled_info{$curdb}}, [($sz, $date, $q, $db, $user, $remote, $app)]); my @tmp_top_cancelled_info = sort {$b->[0] <=> $a->[0]} @{$top_cancelled_info{$curdb}}; @{$top_cancelled_info{$curdb}} = (); for (my $i = 0; $i <= $#tmp_top_cancelled_info; $i++) { push(@{$top_cancelled_info{$curdb}}, $tmp_top_cancelled_info[$i]); last if ($i == $end_top); } } # Stores the top N queries waiting the most sub set_top_locked_info { my ($curdb, $q, $dt, $date, $db, $user, $remote, $app) = @_; push(@{$top_locked_info{$curdb}}, [($dt, $date, $q, $db, $user, $remote, $app)]); my @tmp_top_locked_info = sort {$b->[0] <=> $a->[0]} @{$top_locked_info{$curdb}}; @{$top_locked_info{$curdb}} = (); for (my $i = 0; $i <= $#tmp_top_locked_info; $i++) { push(@{$top_locked_info{$curdb}}, $tmp_top_locked_info[$i]); last if ($i == $end_top); } } # Stores the top N slowest queries sub set_top_slowest { my ($curdb, $q, $dt, $date, $db, $user, $remote, $app, $bind, $plan) = @_; push(@{$top_slowest{$curdb}}, [($dt, $date, $q, $db, $user, $remote, $app, $bind, $plan)]); my @tmp_top_slowest = sort {$b->[0] <=> $a->[0]} @{$top_slowest{$curdb}}; @{$top_slowest{$curdb}} = (); for (my $i = 0; $i <= $#tmp_top_slowest; $i++) { push(@{$top_slowest{$curdb}}, $tmp_top_slowest[$i]); last if ($i == $end_top); } } # Stores top N slowest sample queries sub set_top_sample { my ($curdb, $norm, $q, $dt, $date, $db, $user, $remote, $app, $bind, $plan) = @_; return if (!$norm || !$q || $sample <= 0); $normalyzed_info{$curdb}{$norm}{samples}{$dt}{query} = $q; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{date} = $date; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{db} = $db; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{user} = $user; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{remote} = $remote; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{app} = $app; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{bind} = $bind; $normalyzed_info{$curdb}{$norm}{samples}{$dt}{plan} = $plan; my $i = 1; foreach my $k (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$norm}{samples}}) { if ($i > $sample) { delete $normalyzed_info{$curdb}{$norm}{samples}{$k}; } $i++; } } # Stores top N error sample queries sub set_top_error_sample { my ($curdb, $q, $date, $real_error, $detail, $context, $statement, $hint, $db, $user, $app, $remote, $sqlstate) = @_; $errors_code{$curdb}{$sqlstate}++ if ($sqlstate); # Stop when we have our number of samples if (!exists $error_info{$curdb}{$q}{date} || ($#{$error_info{$curdb}{$q}{date}}+1 < $sample)) { if ( ($q =~ /deadlock detected/) || ($real_error && !grep(/^\Q$real_error\E$/, @{$error_info{$curdb}{$q}{error}})) ) { push(@{$error_info{$curdb}{$q}{date}}, $date); push(@{$error_info{$curdb}{$q}{detail}}, $detail); push(@{$error_info{$curdb}{$q}{context}}, $context); push(@{$error_info{$curdb}{$q}{statement}}, $statement); push(@{$error_info{$curdb}{$q}{hint}}, $hint); push(@{$error_info{$curdb}{$q}{error}}, $real_error); push(@{$error_info{$curdb}{$q}{db}}, $db); push(@{$error_info{$curdb}{$q}{user}}, $user); push(@{$error_info{$curdb}{$q}{app}}, $app); push(@{$error_info{$curdb}{$q}{remote}}, $remote); push(@{$error_info{$curdb}{$q}{sqlstate}}, $sqlstate); } } } # Stores top N error sample from pgbouncer log sub pgb_set_top_error_sample { my ($q, $date, $real_error, $db, $user, $remote) = @_; # Stop when we have our number of samples if (!exists $pgb_error_info{$q}{date} || ($#{$pgb_error_info{$q}{date}} < $sample)) { push(@{$pgb_error_info{$q}{date}}, $date); push(@{$pgb_error_info{$q}{error}}, $real_error); push(@{$pgb_error_info{$q}{db}}, $db); push(@{$pgb_error_info{$q}{user}}, $user); push(@{$pgb_error_info{$q}{remote}}, $remote); } } sub get_log_limit { my $curdb = shift(); $overall_stat{$curdb}{'first_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; my ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s); if (!$log_timezone) { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = ($1, $2, $3, $4, $5, $6); } else { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = change_timezone($1, $2, $3, $4, $5, $6); } my $t_log_min = "$t_y-$t_mo-$t_d $t_h:$t_mi:$t_s"; $overall_stat{$curdb}{'last_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; if (!$log_timezone) { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = ($1, $2, $3, $4, $5, $6); } else { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = change_timezone($1, $2, $3, $4, $5, $6); } my $t_log_max = "$t_y-$t_mo-$t_d $t_h:$t_mi:$t_s"; return ($t_log_min, $t_log_max); } sub dump_as_text { my $curdb = shift; # Global information my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($overall_stat{nlines}{$curdb}); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } # Set logs limits my ($t_log_min, $t_log_max) = get_log_limit($curdb); print $fh qq{ pgBadger :: $report_title - Global information --------------------------------------------------- Generated on $curdate Log file: $logfile_str Parsed $fmt_nlines log entries in $total_time Log start from $t_log_min to $t_log_max }; # Dump normalized queries only if requested if ($dump_normalized_only) { if (!$query_numbering) { print $fh "Count\t\tQuery\n"; print $fh '-'x70,"\n"; } else { print $fh "#Num\tCount\t\tQuery\n"; print $fh '-'x80,"\n"; } foreach my $q (sort { $normalyzed_info{$curdb}{$b}{count} <=> $normalyzed_info{$curdb}{$a}{count} } keys %{$normalyzed_info{$curdb}}) { print $fh $query_numbering++, "\t" if ($query_numbering); print $fh "$normalyzed_info{$curdb}{$q}{count}\t$q\n"; } print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; return; } # Overall statistics my $fmt_unique = &comma_numbers(scalar keys %{$normalyzed_info{$curdb}}); my $fmt_queries = &comma_numbers($overall_stat{$curdb}{'queries_number'}); my $fmt_duration = &convert_time($overall_stat{$curdb}{'queries_duration'}{'execute'}+($overall_stat{$curdb}{'queries_duration'}{'prepare'}||0)+($overall_stat{$curdb}{'queries_duration'}{'bind'}||0)); $overall_stat{$curdb}{'first_query_ts'} ||= '-'; $overall_stat{$curdb}{'last_query_ts'} ||= '-'; print $fh qq{ - Overall statistics --------------------------------------------------- Number of unique normalized queries: $fmt_unique Number of queries: $fmt_queries Total query duration: $fmt_duration First query: $overall_stat{$curdb}{'first_query_ts'} Last query: $overall_stat{$curdb}{'last_query_ts'} }; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{query} <=> $overall_stat{$curdb}{'peak'}{$a}{query}} keys %{$overall_stat{$curdb}{'peak'}}) { print $fh "Query peak: ", &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{query}), " queries/s at $_"; last; } if (!$disable_error) { my $fmt_errors = &comma_numbers($overall_stat{$curdb}{'errors_number'}); my $fmt_unique_error = &comma_numbers(scalar keys %{$error_info{$curdb}}); print $fh qq{ Number of events: $fmt_errors Number of unique normalized events: $fmt_unique_error }; } if ($tempfile_info{$curdb}{count}) { my $fmt_temp_maxsise = &pretty_print_size($tempfile_info{$curdb}{maxsize}); my $fmt_temp_avsize = &pretty_print_size(sprintf("%.2f", ($tempfile_info{$curdb}{size} / $tempfile_info{$curdb}{count}))); print $fh qq{Number temporary files: $tempfile_info{$curdb}{count} Max size of temporary files: $fmt_temp_maxsise Average size of temporary files: $fmt_temp_avsize }; } if ($cancelled_info{$curdb}{count}) { print $fh qq{Number cancelled queries: $cancelled_info{$curdb}{count} }; } if (!$disable_session && $session_info{$curdb}{count}) { my $avg_session_duration = &convert_time($session_info{$curdb}{duration} / $session_info{$curdb}{count}); my $tot_session_duration = &convert_time($session_info{$curdb}{duration}); my $avg_queries = &comma_numbers(int($overall_stat{$curdb}{'queries_number'}/$session_info{$curdb}{count})); my $q_duration = $overall_stat{$curdb}{'queries_duration'}{'execute'}+($overall_stat{$curdb}{'queries_duration'}{'prepare'}||0)+($overall_stat{$curdb}{'queries_duration'}{'bind'}||0); my $avg_duration = &convert_time(int($q_duration/$session_info{$curdb}{count})); my $avg_idle_time = &convert_time( ($session_info{$curdb}{duration} - $q_duration) / ($session_info{$curdb}{count} || 1) ); $avg_idle_time = 'n/a' if (!$session_info{$curdb}{count}); print $fh qq{Total number of sessions: $session_info{$curdb}{count} Total duration of sessions: $tot_session_duration Average duration of sessions: $avg_session_duration Average queries per sessions: $avg_queries Average queries duration per sessions: $avg_duration Average idle time per session: $avg_idle_time }; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{session} <=> $overall_stat{$curdb}{'peak'}{$a}{session}} keys %{$overall_stat{$curdb}{'peak'}}) { next if (!$session_info{$curdb}{count}); print $fh "Session peak: ", &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{session}), " sessions at $_"; last; } } if (!$disable_connection && $connection_info{$curdb}{count}) { print $fh "Total number of connections: $connection_info{$curdb}{count}\n"; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{connection} <=> $overall_stat{$curdb}{'peak'}{$a}{connection}} keys %{$overall_stat{$curdb}{'peak'}}) { if ($overall_stat{$curdb}{'peak'}{$_}{connection} > 0) { print $fh "Connection peak: ", &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{connection}), " conn/s at $_"; } last; } } if (scalar keys %{$database_info{$curdb}} > 1) { print $fh "Total number of databases: ", scalar keys %{$database_info{$curdb}}, "\n"; } if (!$disable_hourly && $overall_stat{$curdb}{'queries_number'}) { print $fh qq{ - Hourly statistics ---------------------------------------------------- Report not supported by text format }; } # INSERT/DELETE/UPDATE/SELECT repartition my $totala = 0; foreach my $a (@SQL_ACTION) { $totala += $overall_stat{$curdb}{lc($a)}; } if (!$disable_type && $totala) { my $total = $overall_stat{$curdb}{'queries_number'} || 1; print $fh "\n- Queries by type ------------------------------------------------------\n\n"; print $fh "Type Count Percentage\n"; foreach my $a (@SQL_ACTION) { print $fh "$a: ", &comma_numbers($overall_stat{$curdb}{lc($a)}), " ", sprintf("%0.2f", ($overall_stat{$curdb}{lc($a)} * 100) / $total), "%\n"; } print $fh "OTHERS: ", &comma_numbers($total - $totala), " ", sprintf("%0.2f", (($total - $totala) * 100) / $total), "%\n" if (($total - $totala) > 0); print $fh "\n"; # Show request per database statistics if (scalar keys %{$database_info{$curdb}} > 1) { print $fh "\n- Request per database ------------------------------------------------------\n\n"; print $fh "Database Request type Count Duration\n"; foreach my $d (sort keys %{$database_info{$curdb}}) { print $fh "$d - ", &comma_numbers($database_info{$curdb}{$d}{count}), " ", &convert_time($database_info{$curdb}{$d}{duration}), "\n"; foreach my $r (sort keys %{$database_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($database_info{$curdb}{$d}{$r}), " ", &convert_time($database_info{$curdb}{$d}{"$r|duration"}), "\n"; } } } # Show request per application statistics if (scalar keys %application_info > 1) { print $fh "\n- Request per application ------------------------------------------------------\n\n"; print $fh "Application Request type Count Duration\n"; foreach my $d (sort keys %{$application_info{$curdb}}) { print $fh "$d - ", &comma_numbers($application_info{$curdb}{$d}{count}), " ", &convert_time($application_info{$curdb}{$d}{duration}), "\n"; foreach my $r (sort keys %{$application_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($application_info{$curdb}{$d}{$r}), " ", &convert_time($application_info{$curdb}{$d}{"$r|duration"}), "\n"; } } } # Show request per user statistics if (scalar keys %{$user_info{$curdb}} > 1) { print $fh "\n- Request per user ------------------------------------------------------\n\n"; print $fh "User Request type Count duration\n"; foreach my $d (sort keys %{$user_info{$curdb}}) { print $fh "$d - ", &comma_numbers($user_info{$curdb}{$d}{count}), " ", &convert_time($user_info{$curdb}{$d}{duration}), "\n"; foreach my $r (sort keys %{$user_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($user_info{$curdb}{$d}{$r}), " ", &convert_time($user_info{$curdb}{$d}{"$r|duration"}), "\n"; } } } # Show request per host statistics if (scalar keys %{$host_info{$curdb}} > 1) { print $fh "\n- Request per host ------------------------------------------------------\n\n"; print $fh "Host Request type Count Duration\n"; foreach my $d (sort keys %{$host_info{$curdb}}) { print $fh "$d - ", &comma_numbers($host_info{$curdb}{$d}{count}), " ", &convert_time($host_info{$curdb}{$d}{duration}), "\n"; foreach my $r (sort keys %{$host_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($host_info{$curdb}{$d}{$r}), " ", &convert_time($host_info{$curdb}{$d}{"$r|duration"}), "\n"; } } } } if (!$disable_lock && scalar keys %{$lock_info{$curdb}} > 0) { print $fh "\n- Locks by type ------------------------------------------------------\n\n"; print $fh "Type Object Count Total Duration Avg duration (s)\n"; my $total_count = 0; my $total_duration = 0; foreach my $t (sort keys %{$lock_info{$curdb}}) { print $fh "$t\t\t", &comma_numbers($lock_info{$curdb}{$t}{count}), " ", &convert_time($lock_info{$curdb}{$t}{duration}), " ", &convert_time($lock_info{$curdb}{$t}{duration} / $lock_info{$curdb}{$t}{count}), "\n"; foreach my $o (sort keys %{$lock_info{$curdb}{$t}}) { next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); print $fh "\t$o\t", &comma_numbers($lock_info{$curdb}{$t}{$o}{count}), " ", &convert_time($lock_info{$curdb}{$t}{$o}{duration}), " ", &convert_time($lock_info{$curdb}{$t}{$o}{duration} / $lock_info{$curdb}{$t}{$o}{count}), "\n"; } $total_count += $lock_info{$curdb}{$t}{count}; $total_duration += $lock_info{$curdb}{$t}{duration}; } print $fh "Total:\t\t\t", &comma_numbers($total_count), " ", &convert_time($total_duration), " ", &convert_time($total_duration / ($total_count || 1)), "\n"; } # Show session per database statistics if (!$disable_session && exists $session_info{$curdb}{database}) { print $fh "\n- Sessions per database ------------------------------------------------------\n\n"; print $fh "Database Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{$curdb}{database}}) { print $fh "$d - ", &comma_numbers($session_info{$curdb}{database}{$d}{count}), " ", &convert_time($session_info{$curdb}{database}{$d}{duration}), " ", &convert_time($session_info{$curdb}{database}{$d}{duration} / $session_info{$curdb}{database}{$d}{count}), "\n"; } } # Show session per user statistics if (!$disable_session && exists $session_info{$curdb}{user}) { print $fh "\n- Sessions per user ------------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{$curdb}{user}}) { print $fh "$d - ", &comma_numbers($session_info{$curdb}{user}{$d}{count}), " ", &convert_time($session_info{$curdb}{user}{$d}{duration}), " ", &convert_time($session_info{$curdb}{user}{$d}{duration} / $session_info{$curdb}{user}{$d}{count}), "\n"; } } # Show session per host statistics if (!$disable_session && exists $session_info{$curdb}{host}) { print $fh "\n- Sessions per host ------------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{$curdb}{host}}) { print $fh "$d - ", &comma_numbers($session_info{$curdb}{host}{$d}{count}), " ", &convert_time($session_info{$curdb}{host}{$d}{duration}), " ", &convert_time($session_info{$curdb}{host}{$d}{duration} / $session_info{$curdb}{host}{$d}{count}), "\n"; } } # Show session per application statistics if (!$disable_session && exists $session_info{$curdb}{app}) { print $fh "\n- Sessions per application ------------------------------------------------------\n\n"; print $fh "Application Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{$curdb}{app}}) { print $fh "$d - ", &comma_numbers($session_info{$curdb}{app}{$d}{count}), " ", &convert_time($session_info{$curdb}{app}{$d}{duration}), " ", &convert_time($session_info{$curdb}{app}{$d}{duration} / $session_info{$curdb}{app}{$d}{count}), "\n"; } } # Show connection per database statistics if (!$disable_connection && exists $connection_info{$curdb}{database}) { print $fh "\n- Connections per database ------------------------------------------------------\n\n"; print $fh "Database User Count\n"; foreach my $d (sort keys %{$connection_info{$curdb}{database}}) { print $fh "$d - ", &comma_numbers($connection_info{$curdb}{database}{$d}), "\n"; foreach my $u (sort keys %{$connection_info{$curdb}{user}}) { next if (!exists $connection_info{$curdb}{database_user}{$d}{$u}); print $fh "\t$u ", &comma_numbers($connection_info{$curdb}{database_user}{$d}{$u}), "\n"; } } print $fh "\nDatabase Host Count\n"; foreach my $d (sort keys %{$connection_info{$curdb}{database}}) { print $fh "$d - ", &comma_numbers($connection_info{$curdb}{database}{$d}), "\n"; foreach my $u (sort keys %{$connection_info{$curdb}{host}}) { next if (!exists $connection_info{$curdb}{database_host}{$d}{$u}); print $fh "\t$u ", &comma_numbers($connection_info{$curdb}{database_host}{$d}{$u}), "\n"; } } } # Show connection per user statistics if (!$disable_connection && exists $connection_info{$curdb}{user}) { print $fh "\n- Connections per user ------------------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$connection_info{$curdb}{user}}) { print $fh "$d - ", &comma_numbers($connection_info{$curdb}{user}{$d}), "\n"; } } # Show connection per host statistics if (!$disable_connection && exists $connection_info{$curdb}{host}) { print $fh "\n- Connections per host ------------------------------------------------------\n\n"; print $fh "Host Count\n"; foreach my $d (sort keys %{$connection_info{$curdb}{host}}) { print $fh "$d - ", &comma_numbers($connection_info{$curdb}{host}{$d}), "\n"; } } # Show lock wait detailed information if (!$disable_lock && scalar keys %{$lock_info{$curdb}} > 0) { my @top_locked_queries = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{locks})) { push (@top_locked_queries, [$h, $normalyzed_info{$curdb}{$h}{locks}{count}, $normalyzed_info{$curdb}{$h}{locks}{wait}, $normalyzed_info{$curdb}{$h}{locks}{minwait}, $normalyzed_info{$curdb}{$h}{locks}{maxwait}]); } } # Most frequent waiting queries (N) @top_locked_queries = sort {$b->[2] <=> $a->[2]} @top_locked_queries; print $fh "\n- Most frequent waiting queries (N) -----------------------------------------\n\n"; print $fh "Rank Count Total wait time (s) Min/Max/Avg duration (s) Query\n"; for (my $i = 0 ; $i <= $#top_locked_queries; $i++) { last if ($i > $end_top); print $fh ($i + 1), ") ", $top_locked_queries[$i]->[1], " - ", &convert_time($top_locked_queries[$i]->[2]), " - ", &convert_time($top_locked_queries[$i]->[3]), "/", &convert_time($top_locked_queries[$i]->[4]), "/", &convert_time(($top_locked_queries[$i]->[2] / $top_locked_queries[$i]->[1])), " - ", $top_locked_queries[$i]->[0], "\n"; print $fh "--\n"; my $k = $top_locked_queries[$i]->[0]; my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($j > $sample); my $ttl = $top_locked_queries[$i]->[1] || ''; my $db = ''; $db .= " - $normalyzed_info{$curdb}{$k}{samples}{$d}{date} - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), "$db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $j++; } } print $fh "\n"; @top_locked_queries = (); # Queries that waited the most @{$top_locked_info{$curdb}} = sort {$b->[1] <=> $a->[1]} @{$top_locked_info{$curdb}}; print $fh "\n- Queries that waited the mosts ---------------------------------------------\n\n"; print $fh "Rank Wait time (s) Query\n"; for (my $i = 0 ; $i <= $#{$top_locked_info{$curdb}} ; $i++) { my $ttl = $top_locked_info{$curdb}[$i]->[1] || ''; my $db = ''; $db .= " - database: $top_locked_info{$curdb}[$i]->[3]" if ($top_locked_info{$curdb}[$i]->[3]); $db .= ", user: $top_locked_info{$curdb}[$i]->[4]" if ($top_locked_info{$curdb}[$i]->[4]); $db .= ", remote: $top_locked_info{$curdb}[$i]->[5]" if ($top_locked_info{$curdb}[$i]->[5]); $db .= ", app: $top_locked_info{$curdb}[$i]->[6]" if ($top_locked_info{$curdb}[$i]->[6]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &convert_time($top_locked_info{$curdb}[$i]->[0]), " $ttl$db - ", &anonymize_query($top_locked_info{$curdb}[$i]->[2]), "\n"; print $fh "--\n"; } print $fh "\n"; } # Show temporary files detailed information if (!$disable_temporary && scalar keys %{$tempfile_info{$curdb}} > 0) { my @top_temporary = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{tempfiles})) { push (@top_temporary, [$h, $normalyzed_info{$curdb}{$h}{tempfiles}{count}, $normalyzed_info{$curdb}{$h}{tempfiles}{size}, $normalyzed_info{$curdb}{$h}{tempfiles}{minsize}, $normalyzed_info{$curdb}{$h}{tempfiles}{maxsize}]); } } # Queries generating the most temporary files (N) @top_temporary = sort {$b->[1] <=> $a->[1]} @top_temporary; print $fh "\n- Queries generating the most temporary files (N) ---------------------------\n\n"; print $fh "Rank Count Total size Min/Max/Avg size Query\n"; my $idx = 1; for (my $i = 0 ; $i <= $#top_temporary ; $i++) { last if ($i > $end_top); print $fh $idx, ") ", $top_temporary[$i]->[1], " - ", &comma_numbers($top_temporary[$i]->[2]), " - ", &comma_numbers($top_temporary[$i]->[3]), "/", &comma_numbers($top_temporary[$i]->[4]), "/", &comma_numbers(sprintf("%.2f", $top_temporary[$i]->[2] / $top_temporary[$i]->[1])), " - ", &anonymize_query($top_temporary[$i]->[0]), "\n"; print $fh "--\n"; my $k = $top_temporary[$i]->[0]; if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}}) { my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($j > $sample); my $db = ''; $db .= "$normalyzed_info{$curdb}{$k}{samples}{$d}{date} - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), " - $db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $j++; } } $idx++; } @top_temporary = (); # Top queries generating the largest temporary files @{$top_tempfile_info{$curdb}} = sort {$b->[1] <=> $a->[1]} @{$top_tempfile_info{$curdb}}; print $fh "\n- Queries generating the largest temporary files ----------------------------\n\n"; print $fh "Rank Size Query\n"; for (my $i = 0 ; $i <= $#{$top_tempfile_info{$curdb}} ; $i++) { my $ttl = $top_tempfile_info{$curdb}[$i]->[1] || ''; my $db = ''; $db .= " - database: $top_tempfile_info{$curdb}[$i]->[3]" if ($top_tempfile_info{$curdb}[$i]->[3]); $db .= ", user: $top_tempfile_info{$curdb}[$i]->[4]" if ($top_tempfile_info{$curdb}[$i]->[4]); $db .= ", remote: $top_tempfile_info{$curdb}[$i]->[5]" if ($top_tempfile_info{$curdb}[$i]->[5]); $db .= ", app: $top_tempfile_info{$curdb}[$i]->[6]" if ($top_tempfile_info{$curdb}[$i]->[6]); $db .= ", info: $top_tempfile_info{$curdb}[$i]->[7]" if ($top_tempfile_info{$curdb}[$i]->[7]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &comma_numbers($top_tempfile_info{$curdb}[$i]->[0]), " - $ttl$db - ", &anonymize_query($top_tempfile_info{$curdb}[$i]->[2]), "\n"; } print $fh "\n"; } # Show cancelled queries detailed information if (!$disable_query && scalar keys %{$cancelled_info{$curdb}} > 0) { my @top_cancelled = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{cancelled})) { push (@top_cancelled, [$h, $normalyzed_info{$curdb}{$h}{cancelled}{count}]); } } # Queries generating the most cancelled files (N) @top_cancelled = sort {$b->[1] <=> $a->[1]} @top_cancelled; print $fh "\n- Queries most cancelled (N) ---------------------------\n\n"; print $fh "Rank Count Query\n"; my $idx = 1; for (my $i = 0 ; $i <= $#top_cancelled ; $i++) { last if ($i > $end_top); print $fh $idx, ") ", $top_cancelled[$i]->[1], " - ", $top_cancelled[$i]->[0], "\n"; print $fh "--\n"; my $k = $top_cancelled[$i]->[0]; if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}}) { my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($j > $sample); my $db = ''; $db .= "$normalyzed_info{$curdb}{$k}{samples}{$d}{date} - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), " - $db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $j++; } } $idx++; } @top_cancelled = (); # Top queries generating the largest cancelled files @{$top_cancelled_info{$curdb}} = sort {$b->[1] <=> $a->[1]} @{$top_cancelled_info{$curdb}}; print $fh "\n- Queries generating the most cancellation ----------------------------\n\n"; print $fh "Rank Times cancelled Query\n"; for (my $i = 0 ; $i <= $#{$top_cancelled_info{$curdb}} ; $i++) { my $ttl = $top_cancelled_info{$curdb}[$i]->[1] || ''; my $db = ''; $db .= " - database: $top_cancelled_info{$curdb}[$i]->[3]" if ($top_cancelled_info{$curdb}[$i]->[3]); $db .= ", user: $top_cancelled_info{$curdb}[$i]->[4]" if ($top_cancelled_info{$curdb}[$i]->[4]); $db .= ", remote: $top_cancelled_info{$curdb}[$i]->[5]" if ($top_cancelled_info{$curdb}[$i]->[5]); $db .= ", app: $top_cancelled_info{$curdb}[$i]->[6]" if ($top_cancelled_info{$curdb}[$i]->[6]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &comma_numbers($top_cancelled_info{$curdb}[$i]->[0]), " - $ttl$db - ", &anonymize_query($top_cancelled_info{$curdb}[$i]->[2]), "\n"; } print $fh "\n"; } # Show top information if (!$disable_query && ($#{$top_slowest{$curdb}} >= 0)) { print $fh "\n- Slowest queries ------------------------------------------------------\n\n"; print $fh "Rank Duration (s) Query\n"; for (my $i = 0 ; $i <= $#{$top_slowest{$curdb}} ; $i++) { my $db = ''; $db .= " database: $top_slowest{$curdb}[$i]->[3]" if ($top_slowest{$curdb}[$i]->[3]); $db .= ", user: $top_slowest{$curdb}[$i]->[4]" if ($top_slowest{$curdb}[$i]->[4]); $db .= ", remote: $top_slowest{$curdb}[$i]->[5]" if ($top_slowest{$curdb}[$i]->[5]); $db .= ", app: $top_slowest{$curdb}[$i]->[6]" if ($top_slowest{$curdb}[$i]->[6]); $db .= ", bind query: yes" if ($top_slowest{$curdb}[$i]->[7]); $db =~ s/^, //; print $fh $i + 1, ") " . &convert_time($top_slowest{$curdb}[$i]->[0]) . "$db - " . &anonymize_query($top_slowest{$curdb}[$i]->[2]) . "\n"; print $fh "--\n"; } print $fh "\n- Queries that took up the most time (N) -------------------------------\n\n"; print $fh "Rank Total duration Times executed Min/Max/Avg duration (s) Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$curdb}{$b}{duration} <=> $normalyzed_info{$curdb}{$a}{duration}} keys %{$normalyzed_info{$curdb}}) { next if (!$normalyzed_info{$curdb}{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$curdb}{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$curdb}{$k}{samples}}) { $q = $normalyzed_info{$curdb}{$k}{samples}{$_}{query}; last; } } $q = &anonymize_query($q); $normalyzed_info{$curdb}{$k}{average} = $normalyzed_info{$curdb}{$k}{duration} / $normalyzed_info{$curdb}{$k}{count}; print $fh "$idx) " . &convert_time($normalyzed_info{$curdb}{$k}{duration}) . " - " . &comma_numbers($normalyzed_info{$curdb}{$k}{count}) . " - " . &convert_time($normalyzed_info{$curdb}{$k}{min}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{max}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{average}) . " - $q\n"; print $fh "--\n"; my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($j > $sample); my $db = ''; $db .= " - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), "$db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $j++; } $idx++; } } if (!$disable_query && (scalar keys %{$normalyzed_info{$curdb}} > 0)) { print $fh "\n- Most frequent queries (N) --------------------------------------------\n\n"; print $fh "Rank Times executed Total duration Min/Max/Avg duration (s) Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$curdb}{$b}{count} <=> $normalyzed_info{$curdb}{$a}{count}} keys %{$normalyzed_info{$curdb}}) { next if (!$normalyzed_info{$curdb}{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$curdb}{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$curdb}{$k}{samples}}) { $q = $normalyzed_info{$curdb}{$k}{samples}{$_}{query}; last; } } $q = &anonymize_query($q); print $fh "$idx) " . &comma_numbers($normalyzed_info{$curdb}{$k}{count}) . " - " . &convert_time($normalyzed_info{$curdb}{$k}{duration}) . " - " . &convert_time($normalyzed_info{$curdb}{$k}{min}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{max}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{duration} / $normalyzed_info{$curdb}{$k}{count}) . " - $q\n"; print $fh "--\n"; my $i = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($i > $sample); my $db = ''; $db .= " - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\tExample $i: ", &convert_time($d), "$db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $i++; } $idx++; } } if (!$disable_query && ($#{$top_slowest{$curdb}} >= 0)) { print $fh "\n- Slowest queries (N) --------------------------------------------------\n\n"; print $fh "Rank Min/Max/Avg duration (s) Times executed Total duration Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$curdb}{$b}{average} <=> $normalyzed_info{$curdb}{$a}{average}} keys %{$normalyzed_info{$curdb}}) { next if (!$normalyzed_info{$curdb}{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$curdb}{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$curdb}{$k}{samples}}) { $q = $normalyzed_info{$curdb}{$k}{samples}{$_}{query}; last; } } $q = &anonymize_query($q); print $fh "$idx) " . &convert_time($normalyzed_info{$curdb}{$k}{min}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{max}) . "/" . &convert_time($normalyzed_info{$curdb}{$k}{average}) . " - " . &comma_numbers($normalyzed_info{$curdb}{$k}{count}) . " - " . &convert_time($normalyzed_info{$curdb}{$k}{duration}) . " - $q\n"; print $fh "--\n"; my $i = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($i > $sample); my $db = ''; $db .= " - database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{yes}); $db =~ s/^, / - /; print $fh "\tExample $i: ", &convert_time($d), "$db - ", &anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query}), "\n"; $i++; } $idx++; } } @{$top_slowest{$curdb}} = (); if (!$disable_error) { &show_error_as_text($curdb); } # Show pgbouncer session per database statistics if (exists $pgb_session_info{database}) { print $fh "\n- pgBouncer sessions per database --------------------------------------------\n\n"; print $fh "Database Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{database}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{database}{$d}{count}), " ", &convert_time($pgb_session_info{database}{$d}{duration}), " ", &convert_time($pgb_session_info{database}{$d}{duration} / $pgb_session_info{database}{$d}{count}), "\n"; } } # Show pgbouncer session per user statistics if (exists $pgb_session_info{user}) { print $fh "\n- pgBouncer sessions per user ------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{user}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{user}{$d}{count}), " ", &convert_time($pgb_session_info{user}{$d}{duration}), " ", &convert_time($pgb_session_info{user}{$d}{duration} / $pgb_session_info{user}{$d}{count}), "\n"; } } # Show pgbouncer session per host statistics if (exists $pgb_session_info{host}) { print $fh "\n- pgBouncer sessions per host ------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{host}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{host}{$d}{count}), " ", &convert_time($pgb_session_info{host}{$d}{duration}), " ", &convert_time($pgb_session_info{host}{$d}{duration} / $pgb_session_info{host}{$d}{count}), "\n"; } } # Show pgbouncer session per application statistics if (exists $pgb_session_info{app}) { print $fh "\n- pgBouncer sessions per application -----------------------------------------\n\n"; print $fh "Application Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{app}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{app}{$d}{count}), " ", &convert_time($pgb_session_info{app}{$d}{duration}), " ", &convert_time($pgb_session_info{app}{$d}{duration} / $pgb_session_info{app}{$d}{count}), "\n"; } } # Show pgbouncer connection per database statistics if (exists $pgb_connection_info{database}) { print $fh "\n- pgBouncer connections per database -----------------------------------------\n\n"; print $fh "Database User Count\n"; foreach my $d (sort keys %{$pgb_connection_info{database}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{database}{$d}), "\n"; foreach my $u (sort keys %{$pgb_connection_info{user}}) { next if (!exists $pgb_connection_info{database_user}{$d}{$u}); print $fh "\t$u ", &comma_numbers($pgb_connection_info{database_user}{$d}{$u}), "\n"; } } print $fh "\nDatabase Host Count\n"; foreach my $d (sort keys %{$pgb_connection_info{database}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{database}{$d}), "\n"; foreach my $u (sort keys %{$pgb_connection_info{host}}) { next if (!exists $pgb_connection_info{database_host}{$d}{$u}); print $fh "\t$u ", &comma_numbers($pgb_connection_info{database_host}{$d}{$u}), "\n"; } } } # Show pgbouncer connection per user statistics if (exists $pgb_connection_info{user}) { print $fh "\n- pgBouncer connections per user ---------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$pgb_connection_info{user}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{user}{$d}), "\n"; } } # Show pgbouncer connection per host statistics if (exists $pgb_connection_info{host}) { print $fh "\n- pgBouncer connections per host --------------------------------------------\n\n"; print $fh "Host Count\n"; foreach my $d (sort keys %{$pgb_connection_info{host}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{host}{$d}), "\n"; } } if (!$disable_error) { &show_pgb_error_as_text(); } print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; } sub dump_error_as_text { my $curdb = shift; # Global information my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($overall_stat{nlines}{$curdb}); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } $report_title ||= 'PostgreSQL Log Analyzer'; # Set logs limits my ($t_log_min, $t_log_max) = get_log_limit($curdb); print $fh qq{ pgBadger :: $report_title - Global information --------------------------------------------------- Generated on $curdate Log file: $logfile_str Parsed $fmt_nlines log entries in $total_time Log start from $t_log_min to $t_log_max }; &show_error_as_text($curdb); print $fh "\n\n"; &show_pgb_error_as_text(); print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; } # We change temporary log level from LOG to ERROR # to store these messages into the error report sub change_log_level { my $msg = shift; return 1 if ($msg =~ /parameter "[^"]+" changed to "[^"]+"/); return 1 if ($msg =~ /database system was/); return 1 if ($msg =~ /recovery has paused/); return 1 if ($msg =~ /ending cancel to blocking autovacuum/); return 1 if ($msg =~ /skipping analyze of/); return 1 if ($msg =~ /using stale statistics/); return 1 if ($msg =~ /replication command:/); return 1 if ($msg =~ /still waiting for/); return 0; } sub revert_log_level { my $msg = shift; return ($msg, 1) if ($msg =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (database system was)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (recovery has paused)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (sending cancel to blocking autovacuum)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (skipping analyze of)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (using stale statistics)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (received replication command:)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (.*still waiting for)/LOG: $1/); return ($msg, 0); } sub show_error_as_text { my $curdb = shift; return if (scalar keys %error_info == 0); print $fh "\n- Most frequent events (N) ---------------------------------------------\n\n"; my $idx = 1; foreach my $k (sort {$error_info{$curdb}{$b}{count} <=> $error_info{$curdb}{$a}{count}} keys %{$error_info{$curdb}}) { next if (!$error_info{$curdb}{$k}{count}); last if ($idx > $top); last if (!$error_info{$curdb}{$k}{count}); my ($msg, $ret) = &revert_log_level($k); if ($error_info{$curdb}{$k}{count} > 1) { print $fh "$idx) " . &comma_numbers($error_info{$curdb}{$k}{count}) . " - $msg\n"; print $fh "--\n"; my $j = 1; for (my $i = 0 ; $i <= $#{$error_info{$curdb}{$k}{date}} ; $i++) { last if ($i == $sample); ($error_info{$curdb}{$k}{error}[$i], $ret) = &revert_log_level($error_info{$k}{error}[$i]); if ($msg && ($logs_type{$curdb}{ERROR} > 0)) { $logs_type{$curdb}{ERROR}--; $logs_type{$curdb}{LOG}++; } print $fh "\t- Example $j: $error_info{$curdb}{$k}{date}[$i] - $error_info{$k}{error}[$i]\n"; print $fh "\t\tDetail: $error_info{$curdb}{$k}{detail}[$i]\n" if ($error_info{$k}{detail}[$i]); print $fh "\t\tContext: $error_info{$curdb}{$k}{context}[$i]\n" if ($error_info{$k}{context}[$i]); print $fh "\t\tHint: $error_info{$curdb}{$k}{hint}[$i]\n" if ($error_info{$k}{hint}[$i]); print $fh "\t\tStatement: ", &anonymize_query($error_info{$curdb}{$k}{statement}[$i]), "\n" if ($error_info{$k}{statement}[$i]); print $fh "\t\tDatabase: $error_info{$curdb}{$k}{db}[$i]\n" if ($error_info{$k}{db}[$i]); $j++; } } elsif ($error_info{$curdb}{$k}{error}[0]) { ($error_info{$curdb}{$k}{error}[0], $ret) = &revert_log_level($error_info{$curdb}{$k}{error}[0]); if ($msg && ($logs_type{$curdb}{ERROR} > 0)) { $logs_type{$curdb}{ERROR}--; $logs_type{$curdb}{LOG}++; } if ($sample) { print $fh "$idx) " . &comma_numbers($error_info{$curdb}{$k}{count}) . " - $error_info{$curdb}{$k}{error}[0]\n"; print $fh "--\n"; print $fh "\t- Date: $error_info{$curdb}{$k}{date}[0]\n"; print $fh "\t\tDetail: $error_info{$curdb}{$k}{detail}[0]\n" if ($error_info{$curdb}{$k}{detail}[0]); print $fh "\t\tContext: $error_info{$curdb}{$k}{context}[0]\n" if ($error_info{$curdb}{$k}{context}[0]); print $fh "\t\tHint: $error_info{$curdb}{$k}{hint}[0]\n" if ($error_info{$curdb}{$k}{hint}[0]); print $fh "\t\tStatement: ", &anonymize_query($error_info{$curdb}{$k}{statement}[0]), "\n" if ($error_info{$curdb}{$k}{statement}[0]); print $fh "\t\tDatabase: $error_info{$curdb}{$k}{db}[0]\n" if ($error_info{$curdb}{$k}{db}[0]); } else { print $fh "$idx) " . &comma_numbers($error_info{$curdb}{$k}{count}) . " - $msg\n"; print $fh "--\n"; } } $idx++; } if (scalar keys %{$logs_type{$curdb}} > 0) { print $fh "\n- Logs per type ---------------------------------------------\n\n"; my $total_logs = 0; foreach my $d (keys %{$logs_type{$curdb}}) { $total_logs += $logs_type{$curdb}{$d}; } print $fh "Logs type Count Percentage\n"; foreach my $d (sort keys %{$logs_type{$curdb}}) { next if (!$logs_type{$curdb}{$d}); print $fh "$d\t\t", &comma_numbers($logs_type{$curdb}{$d}), "\t", sprintf("%0.2f", ($logs_type{$curdb}{$d} * 100) / $total_logs), "%\n"; } } if (scalar keys %{$errors_code{$curdb}} > 0) { print $fh "\n- Logs per type ---------------------------------------------\n\n"; my $total_logs = 0; foreach my $d (keys %{$errors_code{$curdb}}) { $total_logs += $errors_code{$curdb}{$d}; } print $fh "Errors class code Count Percentage\n"; foreach my $d (sort keys %{$errors_code{$curdb}}) { next if (!$errors_code{$curdb}{$d}); print $fh "$CLASS_ERROR_CODE{$d}\t$d\t\t", &comma_numbers($errors_code{$curdb}{$d}), "\t", sprintf("%0.2f", ($errors_code{$curdb}{$d} * 100) / $total_logs), "%\n"; } } } sub show_pgb_error_as_text { return if (scalar keys %pgb_error_info == 0); print $fh "\n- Most frequent events (N) ---------------------------------------------\n\n"; my $idx = 1; foreach my $k (sort {$pgb_error_info{$b}{count} <=> $pgb_error_info{$a}{count}} keys %pgb_error_info) { next if (!$pgb_error_info{$k}{count}); last if ($idx > $top); my $msg = $k; if ($pgb_error_info{$k}{count} > 1) { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; my $j = 1; for (my $i = 0 ; $i <= $#{$pgb_error_info{$k}{date}} ; $i++) { last if ($i == $sample); print $fh "\t- Example $j: $pgb_error_info{$k}{date}[$i] - $pgb_error_info{$k}{error}[$i]\n"; print $fh "\t\tDatabase: $pgb_error_info{$k}{db}[$i]\n" if ($pgb_error_info{$k}{db}[$i]); print $fh "\t\tUser: $pgb_error_info{$k}{user}[$i]\n" if ($pgb_error_info{$k}{user}[$i]); print $fh "\t\tClient: $pgb_error_info{$k}{remote}[$i]\n" if ($pgb_error_info{$k}{remote}[$i]); $j++; } } else { if ($sample) { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $pgb_error_info{$k}{error}[0]\n"; print $fh "--\n"; print $fh "\t- Date: $pgb_error_info{$k}{date}[0]\n"; print $fh "\t\tDatabase: $pgb_error_info{$k}{db}[0]\n" if ($pgb_error_info{$k}{db}[0]); print $fh "\t\tUser: $pgb_error_info{$k}{user}[0]\n" if ($pgb_error_info{$k}{user}[0]); print $fh "\t\tClient: $pgb_error_info{$k}{remote}[0]\n" if ($pgb_error_info{$k}{remote}[0]); } else { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; } } $idx++; } } sub html_header { my $uri = shift; my $curdb = shift; my $date = localtime(time); my $global_info = &print_global_information($curdb); my @tmpjscode = @jscode; my $path_prefix = ''; $path_prefix = '../' if ($report_per_database); for (my $i = 0; $i <= $#tmpjscode; $i++) { $tmpjscode[$i] =~ s/EDIT_URI/$path_prefix$uri/; } my $local_title = 'PostgreSQL Log Analyzer'; if ($report_title) { $local_title = $report_title; } $report_title ||= 'pgBadger'; print $fh qq{ pgBadger :: $local_title @tmpjscode
}; } # Create global information section sub print_global_information { my $curdb = shift(); my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($overall_stat{nlines}{$curdb}); my $t3 = Benchmark->new; my $td = timediff($t3, $t0); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } # Set logs limits my ($t_log_min, $t_log_max) = get_log_limit($curdb); return qq{ }; } sub print_overall_statistics { my $curdb = shift(); my $fmt_unique = &comma_numbers(scalar keys %{$normalyzed_info{$curdb}}); my $fmt_queries = &comma_numbers($overall_stat{$curdb}{'queries_number'}); my $avg_queries = &comma_numbers(int($overall_stat{$curdb}{'queries_number'}/($session_info{$curdb}{count} || 1))); my $q_duration = ($overall_stat{$curdb}{'queries_duration'}{'execute'}||0)+($overall_stat{$curdb}{'queries_duration'}{'prepare'}||0)+($overall_stat{$curdb}{'queries_duration'}{'bind'}||0); my $fmt_duration_prepare = &convert_time($overall_stat{$curdb}{'queries_duration'}{'prepare'}||0); my $fmt_duration_bind = &convert_time($overall_stat{$curdb}{'queries_duration'}{'bind'}||0); my $fmt_duration_execute = &convert_time($overall_stat{$curdb}{'queries_duration'}{'execute'}||0); my $fmt_duration = &convert_time($q_duration); $overall_stat{$curdb}{'first_query_ts'} ||= '-'; $overall_stat{$curdb}{'last_query_ts'} ||= '-'; my $query_peak = 0; my $query_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{query} <=> $overall_stat{$curdb}{'peak'}{$a}{query}} keys %{$overall_stat{$curdb}{'peak'}}) { $query_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{query}); $query_peak_date = $_ if ($query_peak); last; } my $avg_duration = &convert_time(int($q_duration/($session_info{$curdb}{count} || 1))); my $fmt_errors = &comma_numbers($overall_stat{$curdb}{'errors_number'}); my $fmt_unique_error = &comma_numbers(scalar keys %{$error_info{$curdb}}); my $autovacuum_count = &comma_numbers($autovacuum_info{$curdb}{count}); my $autoanalyze_count = &comma_numbers($autoanalyze_info{$curdb}{count}); my $tempfile_count = &comma_numbers($tempfile_info{$curdb}{count}); my $cancelled_count = &comma_numbers($cancelled_info{$curdb}{count}); my $fmt_temp_maxsise = &pretty_print_size($tempfile_info{$curdb}{maxsize}); my $fmt_temp_avsize = &pretty_print_size(sprintf("%.2f", $tempfile_info{$curdb}{size} / ($tempfile_info{$curdb}{count} || 1))); my $session_count = &comma_numbers($session_info{$curdb}{count}); my $avg_session_duration = &convert_time($session_info{$curdb}{duration} / ($session_info{$curdb}{count} || 1)); my $tot_session_duration = &convert_time($session_info{$curdb}{duration}); my $connection_count = &comma_numbers($connection_info{$curdb}{count}); my $avg_idle_time = &convert_time( ($session_info{$curdb}{duration} - $q_duration) / ($session_info{$curdb}{count} || 1)); $avg_idle_time = 'n/a' if (!$session_info{$curdb}{count}); my $connection_peak = 0; my $connection_peak_date = ''; my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{connection} <=> $overall_stat{$curdb}{'peak'}{$a}{connection}} keys %{$overall_stat{$curdb}{'peak'}}) { $connection_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{connection}); $connection_peak_date = $_ if ($connection_peak); last; } foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{session} <=> $overall_stat{$curdb}{'peak'}{$a}{session}} keys %{$overall_stat{$curdb}{'peak'}}) { next if (!$session_count); $session_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{session}); $session_peak_date = $_ if ($session_peak); last; } my $main_error = 0; my $total = 0; foreach my $k (sort {$error_info{$curdb}{$b}{count} <=> $error_info{$curdb}{$a}{count}} keys %{$error_info{$curdb}}) { next if (!$error_info{$curdb}{$k}{count}); $main_error = &comma_numbers($error_info{$curdb}{$k}{count}) if (!$main_error); $total += $error_info{$curdb}{$k}{count}; } $total = &comma_numbers($total); my $db_count = scalar keys %{$database_info{$curdb}}; print $fh qq{

Overview

Global Stats

  • $fmt_unique Number of unique normalized queries
  • $fmt_queries Number of queries
  • $fmt_duration Total query duration
  • $overall_stat{$curdb}{'first_query_ts'} First query
  • $overall_stat{$curdb}{'last_query_ts'} Last query
  • $query_peak queries/s at $query_peak_date Query peak
  • $fmt_duration Total query duration
  • $fmt_duration_prepare Prepare/parse total duration
  • $fmt_duration_bind Bind total duration
  • $fmt_duration_execute Execute total duration
  • $fmt_errors Number of events
  • $fmt_unique_error Number of unique normalized events
  • $main_error Max number of times the same event was reported
  • $cancelled_count Number of cancellation
  • $autovacuum_count Total number of automatic vacuums
  • $autoanalyze_count Total number of automatic analyzes
  • $tempfile_count Number temporary file
  • $fmt_temp_maxsise Max size of temporary file
  • $fmt_temp_avsize Average size of temporary file
  • $session_count Total number of sessions
  • $session_peak sessions at $session_peak_date Session peak
  • $tot_session_duration Total duration of sessions
  • $avg_session_duration Average duration of sessions
  • $avg_queries Average queries per session
  • $avg_duration Average queries duration per session
  • $avg_idle_time Average idle time per session
  • $connection_count Total number of connections
  • }; if ($connection_count) { print $fh qq{
  • $connection_peak connections/s at $connection_peak_date Connection peak
  • }; } print $fh qq{
  • $db_count Total number of databases
}; } sub print_general_activity { my $curdb = shift; my $queries = ''; my $select_queries = ''; my $write_queries = ''; my $prepared_queries = ''; my $connections = ''; my $sessions = ''; foreach my $d (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$curdb}{$d}}) { my %cur_period_info = (); my $read_average_duration = 0; my $read_average_count = 0; my $write_average_duration = 0; my $write_average_count = 0; my %all_query_duration=(); foreach my $m (keys %{$per_minute_info{$curdb}{$d}{$h}}) { $cur_period_info{$curdb}{count} += ($per_minute_info{$curdb}{$d}{$h}{$m}{query}{count} || 0); $cur_period_info{$curdb}{duration} += ($per_minute_info{$curdb}{$d}{$h}{$m}{query}{duration} || 0); $cur_period_info{$curdb}{min} = $per_minute_info{$curdb}{$d}{$h}{$m}{query}{min} if (!exists $cur_period_info{$curdb}{min} || ($per_minute_info{$curdb}{$d}{$h}{$m}{query}{min} < $cur_period_info{$curdb}{min})); $cur_period_info{$curdb}{max} = $per_minute_info{$curdb}{$d}{$h}{$m}{query}{max} if (!exists $cur_period_info{$curdb}{max} || ($per_minute_info{$curdb}{$d}{$h}{$m}{query}{max} > $cur_period_info{$curdb}{max})); push(@{$all_query_duration{'query'}}, $per_minute_info{$curdb}{$d}{$h}{$m}{query}{duration}||0); foreach my $a (@SQL_ACTION) { $cur_period_info{$curdb}{$a}{count} += ($per_minute_info{$curdb}{$d}{$h}{$m}{lc($a)}{count} || 0); $cur_period_info{$curdb}{$a}{duration} += ($per_minute_info{$curdb}{$d}{$h}{$m}{lc($a)}{duration} || 0); push(@{$all_query_duration{$a}}, $per_minute_info{$curdb}{$d}{$h}{$m}{lc($a)}{duration}||0); $cur_period_info{$curdb}{usual} += ($per_minute_info{$curdb}{$d}{$h}{$m}{lc($a)}{count} || 0); } $cur_period_info{$curdb}{prepare} += ($per_minute_info{$curdb}{$d}{$h}{$m}{prepare} || 0); $cur_period_info{$curdb}{execute} += ($per_minute_info{$curdb}{$d}{$h}{$m}{execute} || 0); } $cur_period_info{$curdb}{average} = $cur_period_info{$curdb}{duration} / ($cur_period_info{$curdb}{count} || 1); $read_average_duration = ($cur_period_info{$curdb}{'SELECT'}{duration} + $cur_period_info{$curdb}{'COPY TO'}{duration}); $read_average_count = ($cur_period_info{$curdb}{'SELECT'}{count} + $cur_period_info{$curdb}{'COPY TO'}{count}); $cur_period_info{$curdb}{'SELECT'}{average} = $cur_period_info{$curdb}{'SELECT'}{duration} / ($cur_period_info{$curdb}{'SELECT'}{count} || 1); $write_average_duration = ($cur_period_info{$curdb}{'INSERT'}{duration} + $cur_period_info{$curdb}{'UPDATE'}{duration} + $cur_period_info{$curdb}{'DELETE'}{duration} + $cur_period_info{$curdb}{'COPY FROM'}{duration}); $write_average_count = ($cur_period_info{$curdb}{'INSERT'}{count} + $cur_period_info{$curdb}{'UPDATE'}{count} + $cur_period_info{$curdb}{'DELETE'}{count} + $cur_period_info{$curdb}{'COPY FROM'}{count}); $zday = " " if ($c > 1); $c++; my $count = &comma_numbers($cur_period_info{$curdb}{count}); my $min = &convert_time($cur_period_info{$curdb}{min}); my $max = &convert_time($cur_period_info{$curdb}{max}); my $average = &convert_time($cur_period_info{$curdb}{average}); my %percentile = (); foreach my $lp (@LATENCY_PERCENTILE) { $cur_period_info{$curdb}{$lp}{percentileindex} = int(@{$all_query_duration{'query'}} * $lp / 100) ; @{$all_query_duration{'query'}}= sort{ $a <=> $b } @{$all_query_duration{'query'}}; $cur_period_info{$curdb}{$lp}{percentile} = $all_query_duration{'query'}[$cur_period_info{$curdb}{$lp}{percentileindex}]; $percentile{$lp} = &convert_time($cur_period_info{$curdb}{$lp}{percentile}); @{$all_query_duration{'READ'}}= sort{ $a <=> $b } (@{$all_query_duration{'SELECT'}}, @{$all_query_duration{'COPY TO'}}); $cur_period_info{$curdb}{'READ'}{$lp}{percentileindex} = int(@{$all_query_duration{'READ'}} * $lp / 100) ; $cur_period_info{$curdb}{'READ'}{$lp}{percentile} = $all_query_duration{'READ'}[$cur_period_info{$curdb}{'READ'}{$lp}{percentileindex}]; $percentile{'READ'}{$lp} = &convert_time($cur_period_info{$curdb}{'READ'}{$lp}{percentile}); @{$all_query_duration{'WRITE'}}= sort{ $a <=> $b } (@{$all_query_duration{'INSERT'}},@{$all_query_duration{'UPDATE'}},@{$all_query_duration{'DELETE'}},@{$all_query_duration{'COPY FROM'}}); $cur_period_info{$curdb}{'WRITE'}{$lp}{percentileindex} = int(@{$all_query_duration{'WRITE'}} * $lp / 100) ; $cur_period_info{$curdb}{'WRITE'}{$lp}{percentile} = $all_query_duration{'WRITE'}[$cur_period_info{$curdb}{'WRITE'}{$lp}{percentileindex}]; $percentile{'WRITE'}{$lp} = &convert_time($cur_period_info{$curdb}{'WRITE'}{$lp}{percentile}); } $queries .= qq{ $zday $h $count $min $max $average }; foreach my $lp (@LATENCY_PERCENTILE) { $queries .= "$percentile{$lp}\n"; } $queries .= qq{ }; $count = &comma_numbers($cur_period_info{$curdb}{'SELECT'}{count}); my $copyto_count = &comma_numbers($cur_period_info{$curdb}{'COPY TO'}{count}); $average = &convert_time($read_average_duration / ($read_average_count || 1)); $select_queries .= qq{ $zday $h $count $copyto_count $average }; foreach my $lp (@LATENCY_PERCENTILE) { $select_queries .= "$percentile{'READ'}{$lp}\n"; } $select_queries .= qq{ }; my $insert_count = &comma_numbers($cur_period_info{$curdb}{'INSERT'}{count}); my $update_count = &comma_numbers($cur_period_info{$curdb}{'UPDATE'}{count}); my $delete_count = &comma_numbers($cur_period_info{$curdb}{'DELETE'}{count}); my $copyfrom_count = &comma_numbers($cur_period_info{$curdb}{'COPY FROM'}{count}); my $write_average = &convert_time($write_average_duration / ($write_average_count || 1)); $write_queries .= qq{ $zday $h $insert_count $update_count $delete_count $copyfrom_count $write_average} ; foreach my $lp (@LATENCY_PERCENTILE) { $write_queries .= "$percentile{'WRITE'}{$lp}\n"; } $write_queries .= qq{ }; my $prepare_count = &comma_numbers($cur_period_info{$curdb}{prepare}); my $execute_count = &comma_numbers($cur_period_info{$curdb}{execute}); my $bind_prepare = &comma_numbers(sprintf("%.2f", $cur_period_info{$curdb}{execute}/($cur_period_info{$curdb}{prepare}||1))); my $prepare_usual = &comma_numbers(sprintf("%.2f", ($cur_period_info{$curdb}{prepare}/($cur_period_info{$curdb}{usual}||1)) * 100)) . "%"; $prepared_queries .= qq{ $zday $h $prepare_count $execute_count $bind_prepare $prepare_usual }; $count = &comma_numbers($connection_info{$curdb}{chronos}{"$d"}{"$h"}{count}); $average = &comma_numbers(sprintf("%0.2f", $connection_info{$curdb}{chronos}{"$d"}{"$h"}{count} / 3600)); $connections .= qq{ $zday $h $count $average/s }; $count = &comma_numbers($session_info{$curdb}{chronos}{"$d"}{"$h"}{count}); $cur_period_info{$curdb}{'session'}{average} = $session_info{$curdb}{chronos}{"$d"}{"$h"}{duration} / ($session_info{$curdb}{chronos}{"$d"}{"$h"}{count} || 1); $average = &convert_time($cur_period_info{$curdb}{'session'}{average}); my $avg_idle = &convert_time(($session_info{$curdb}{chronos}{"$d"}{"$h"}{duration} - $cur_period_info{$curdb}{duration}) / ($session_info{$curdb}{chronos}{"$d"}{"$h"}{count} || 1)); $sessions .= qq{ $zday $h $count $average $avg_idle }; } } # Set default values $queries = qq{$NODATA} if (!$queries); $select_queries = qq{$NODATA} if (!$select_queries); $write_queries = qq{$NODATA} if (!$write_queries); $prepared_queries = qq{$NODATA} if (!$prepared_queries); $connections = qq{$NODATA} if (!$connections); $sessions = qq{$NODATA} if (!$sessions); print $fh qq{

General Activity

$queries
Day Hour Count Min duration Max duration Avg duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$select_queries
Day Hour SELECT COPY TO Average Duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$write_queries
Day Hour INSERT UPDATE DELETE COPY FROM Average Duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$prepared_queries
Day Hour Prepare Bind Bind/Prepare Percentage of prepare
$connections
Day Hour Count Average / Second
$sessions
Day Hour Count Average Duration Average idle time
Back to the top of the General Activity table
}; } sub print_sql_traffic { my $curdb = shift; my $bind_vs_prepared = sprintf("%.2f", $overall_stat{$curdb}{'execute'} / ($overall_stat{$curdb}{'prepare'} || 1)); my $total_usual_queries = 0; map { $total_usual_queries += $overall_stat{$curdb}{lc($_)}; } @SQL_ACTION; my $prepared_vs_normal = sprintf("%.2f", ($overall_stat{$curdb}{'execute'} / ($total_usual_queries || 1))*100); my $query_peak = 0; my $query_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{query} <=> $overall_stat{$curdb}{'peak'}{$a}{query}} keys %{$overall_stat{$curdb}{'peak'}}) { $query_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{query}); $query_peak_date = $_ if ($query_peak); last; } my $select_peak = 0; my $select_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{select} <=> $overall_stat{$curdb}{'peak'}{$a}{select}} keys %{$overall_stat{$curdb}{'peak'}}) { $select_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{select}); $select_peak_date = $_ if ($select_peak); last; } my $write_peak = 0; my $write_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{write} <=> $overall_stat{$curdb}{'peak'}{$a}{write}} keys %{$overall_stat{$curdb}{'peak'}}) { $write_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{write}); $write_peak_date = $_ if ($write_peak); last; } my $fmt_duration = &convert_time($overall_stat{$curdb}{'queries_duration'}{'execute'}+($overall_stat{$curdb}{'queries_duration'}{'prepare'}||0)+($overall_stat{$curdb}{'queries_duration'}{'bind'}||0)); print $fh qq{

SQL Traffic

Key values

  • $query_peak queries/s Query Peak
  • $query_peak_date Date
$drawn_graphs{queriespersecond_graph}
}; delete $drawn_graphs{queriespersecond_graph}; print $fh qq{

SELECT Traffic

Key values

  • $select_peak queries/s Query Peak
  • $select_peak_date Date
$drawn_graphs{selectqueries_graph}
}; delete $drawn_graphs{selectqueries_graph}; print $fh qq{

INSERT/UPDATE/DELETE Traffic

Key values

  • $write_peak queries/s Query Peak
  • $write_peak_date Date
$drawn_graphs{writequeries_graph}
}; delete $drawn_graphs{writequeries_graph}; print $fh qq{

Queries duration

Key values

  • $fmt_duration Total query duration
$drawn_graphs{durationqueries_graph}
}; delete $drawn_graphs{durationqueries_graph}; print $fh qq{

Prepared queries ratio

Key values

  • $bind_vs_prepared Ratio of bind vs prepare
  • $prepared_vs_normal % Ratio between prepared and "usual" statements
$drawn_graphs{bindpreparequeries_graph}
}; delete $drawn_graphs{bindpreparequeries_graph}; } sub print_pgbouncer_stats { my $curdb = shift; my $request_peak = 0; my $request_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_req} <=> $pgb_overall_stat{'peak'}{$a}{t_req}} keys %{$pgb_overall_stat{'peak'}}) { $request_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_req}); $request_peak_date = $_; last; } my $inbytes_peak = 0; my $inbytes_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_inbytes} <=> $pgb_overall_stat{'peak'}{$a}{t_inbytes}} keys %{$pgb_overall_stat{'peak'}}) { $inbytes_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_inbytes}); $inbytes_peak_date = $_; last; } my $outbytes_peak = 0; my $outbytes_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_outbytes} <=> $pgb_overall_stat{'peak'}{$a}{t_outbytes}} keys %{$pgb_overall_stat{'peak'}}) { $outbytes_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_outbytes}); $outbytes_peak_date = $_; last; } my $avgduration_peak = 0; my $avgduration_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_avgduration} <=> $pgb_overall_stat{'peak'}{$a}{t_avgduration}} keys %{$pgb_overall_stat{'peak'}}) { $avgduration_peak = &convert_time($pgb_overall_stat{'peak'}{$_}{t_avgduration}); $avgduration_peak_date = $_; last; } print $fh qq{

Request Throughput

Key values

  • $request_peak queries/s Request Peak
  • $request_peak_date Date
$drawn_graphs{pgb_requestpersecond_graph}
}; delete $drawn_graphs{pgb_requestpersecond_graph}; print $fh qq{

Bytes I/O Throughput

Key values

  • $inbytes_peak Bytes/s In Bytes Peak
  • $inbytes_peak_date Date
  • $outbytes_peak Bytes/s Out Bytes Peak
  • $outbytes_peak_date Date
$drawn_graphs{pgb_bytepersecond_graph}
}; delete $drawn_graphs{pgb_bytepersecond_graph}; print $fh qq{

Queries Average duration

Key values

  • $avgduration_peak Average Duration Peak
  • $avgduration_peak_date Date
$drawn_graphs{pgb_avgduration_graph}
}; delete $drawn_graphs{pgb_avgduration_graph}; } sub compute_query_graphs { my $curdb = shift; my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}); my %q_dataavg = (); my %a_dataavg = (); my %c_dataavg = (); my %s_dataavg = (); my %p_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if (!exists $p_dataavg{prepare}{"$rd"}) { $p_dataavg{prepare}{"$rd"} = 0; $p_dataavg{execute}{"$rd"} = 0; $q_dataavg{count}{"$rd"} = 0; $q_dataavg{duration}{"$rd"} = 0; $q_dataavg{max}{"$rd"} = 0; $q_dataavg{min}{"$rd"} = 0; if (!$disable_query) { foreach my $action (@SQL_ACTION) { $a_dataavg{$action}{count}{"$rd"} = 0; $a_dataavg{$action}{duration}{"$rd"} = 0; $a_dataavg{$action}{max}{"$rd"} = 0; $a_dataavg{$action}{min}{"$rd"} = 0; } $a_dataavg{write}{count}{"$rd"} = 0; $a_dataavg{write}{duration}{"$rd"} = 0; } $c_dataavg{average}{"$rd"} = 0; $c_dataavg{max}{"$rd"} = 0; $c_dataavg{min}{"$rd"} = 0; $s_dataavg{average}{"$rd"} = 0; $s_dataavg{max}{"$rd"} = 0; $s_dataavg{min}{"$rd"} = 0; } if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{prepare}) { $p_dataavg{prepare}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{prepare}; } elsif (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{parse}) { $p_dataavg{prepare}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{parse}; } $p_dataavg{execute}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{execute} if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{execute}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{query}) { # Average per minute $q_dataavg{count}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{query}{count}; if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{query}{duration}) { $q_dataavg{duration}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{query}{duration}; } # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$curdb}{$tm}{$h}{$m}{query}{second}}) { $q_dataavg{max}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{query}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{query}{second}{$s} > $q_dataavg{max}{"$rd"}); $q_dataavg{min}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{query}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{query}{second}{$s} < $q_dataavg{min}{"$rd"}); } if (!$disable_query) { foreach my $action (@SQL_ACTION) { $a_dataavg{$action}{count}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{count} || 0); $a_dataavg{$action}{duration}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{duration} || 0); if ( ($action ne 'SELECT') && exists $per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{count}) { $a_dataavg{write}{count}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{count} || 0); $a_dataavg{write}{duration}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{duration} || 0); } # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{second}}) { $a_dataavg{$action}{max}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{second}{$s} > $a_dataavg{$action}{max}{"$rd"}); $a_dataavg{$action}{min}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{lc($action)}{second}{$s} < $a_dataavg{$action}{min}{"$rd"}); } } } } if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{connection}) { # Average per minute $c_dataavg{average}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{second}}) { $c_dataavg{max}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); $c_dataavg{min}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"}); } delete $per_minute_info{$curdb}{$tm}{$h}{$m}{connection}; } if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{session}) { # Average per minute $s_dataavg{average}{"$rd"} += $per_minute_info{$curdb}{$tm}{$h}{$m}{session}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$curdb}{$tm}{$h}{$m}{session}{second}}) { $s_dataavg{max}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{session}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{session}{second}{$s} > $s_dataavg{max}{"$rd"}); $s_dataavg{min}{"$rd"} = $per_minute_info{$curdb}{$tm}{$h}{$m}{session}{second}{$s} if ($per_minute_info{$curdb}{$tm}{$h}{$m}{session}{second}{$s} < $s_dataavg{min}{"$rd"}); } delete $per_minute_info{$curdb}{$tm}{$h}{$m}{session}; } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $q_dataavg{count}) { # Average queries per minute $graph_data{query} .= "[$t, " . int(($q_dataavg{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max queries per minute $graph_data{'query-max'} .= "[$t, " . ($q_dataavg{max}{"$rd"} || 0) . "],"; # Min queries per minute $graph_data{'query-min'} .= "[$t, " . ($q_dataavg{min}{"$rd"} || 0) . "],"; # Average duration per minute $graph_data{query4} .= "[$t, " . sprintf("%.3f", ($q_dataavg{duration}{"$rd"} || 0) / ($q_dataavg{count}{"$rd"} || 1)) . "],"; } if (scalar keys %c_dataavg) { # Average connections per minute $graph_data{conn_avg} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{conn_max} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{conn_min} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; } if (scalar keys %s_dataavg) { # Average connections per minute $graph_data{sess_avg} .= "[$t, " . int(($s_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{sess_max} .= "[$t, " . ($s_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{sess_min} .= "[$t, " . ($s_dataavg{min}{"$rd"} || 0) . "],"; } if (!$disable_query && (scalar keys %a_dataavg > 0)) { foreach my $action (@SQL_ACTION) { next if ($select_only && ($action ne 'SELECT')); # Average queries per minute $graph_data{"$action"} .= "[$t, " . int(($a_dataavg{$action}{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; if ($action eq 'SELECT') { # Max queries per minute $graph_data{"$action-max"} .= "[$t, " . ($a_dataavg{$action}{max}{"$rd"} || 0) . "],"; # Min queries per minute $graph_data{"$action-min"} .= "[$t, " . ($a_dataavg{$action}{min}{"$rd"} || 0) . "],"; # Average query duration $graph_data{"$action-2"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{$action}{duration}{"$rd"} || 0) / ($a_dataavg{$action}{count}{"$rd"} || 1)) . "],"; } else { # Average query duration $graph_data{"write"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{write}{duration}{"$rd"} || 0) / ($a_dataavg{write}{count}{"$rd"} || 1)) . "],"; } } } if (!$disable_query && (scalar keys %p_dataavg> 0)) { $graph_data{prepare} .= "[$t, " . ($p_dataavg{prepare}{"$rd"} || 0) . "],"; $graph_data{execute} .= "[$t, " . ($p_dataavg{execute}{"$rd"} || 0) . "],"; $graph_data{ratio_bind_prepare} .= "[$t, " . sprintf("%.2f", ($p_dataavg{execute}{"$rd"} || 0) / ($p_dataavg{prepare}{"$rd"} || 1)) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } $drawn_graphs{'queriespersecond_graph'} = &jqplot_linegraph( $graphid++, 'queriespersecond_graph', $graph_data{'query-max'}, $graph_data{query}, $graph_data{'query-min'}, 'Queries per second (' . $avg_minutes . ' minutes average)', 'Queries per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'connectionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'connectionspersecond_graph', $graph_data{conn_max}, $graph_data{conn_avg}, $graph_data{conn_min}, 'Connections per second (' . $avg_minutes . ' minutes average)', 'Connections per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'sessionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'sessionspersecond_graph', $graph_data{sess_max}, $graph_data{sess_avg}, $graph_data{sess_min}, 'Number of sessions/second (' . $avg_minutes . ' minutes average)', 'Sessions', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'selectqueries_graph'} = &jqplot_linegraph( $graphid++, 'selectqueries_graph', $graph_data{"SELECT-max"}, $graph_data{"SELECT"}, $graph_data{"SELECT-min"}, 'SELECT queries (' . $avg_minutes . ' minutes period)', 'Queries per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'writequeries_graph'} = &jqplot_linegraph( $graphid++, 'writequeries_graph', $graph_data{"DELETE"}, $graph_data{"INSERT"}, $graph_data{"UPDATE"}, 'Write queries (' . $avg_minutes . ' minutes period)', 'Queries', 'DELETE queries', 'INSERT queries', 'UPDATE queries' ); if (!$select_only) { $drawn_graphs{'durationqueries_graph'} = &jqplot_linegraph( $graphid++, 'durationqueries_graph', $graph_data{query4}, $graph_data{"SELECT-2"}, $graph_data{write}, 'Average queries duration (' . $avg_minutes . ' minutes average)', 'Duration', 'All queries', 'Select queries', 'Write queries' ); } else { $drawn_graphs{'durationqueries_graph'} = &jqplot_linegraph( $graphid++, 'durationqueries_graph', $graph_data{query4}, '', '', 'Average queries duration (' . $avg_minutes . ' minutes average)', 'Duration', 'Select queries' ); } $drawn_graphs{'bindpreparequeries_graph'} = &jqplot_linegraph( $graphid++, 'bindpreparequeries_graph', $graph_data{prepare}, $graph_data{"execute"}, $graph_data{ratio_bind_prepare}, 'Bind versus prepare statements (' . $avg_minutes . ' minutes average)', 'Number of statements', 'Prepare/Parse', 'Execute/Bind', 'Bind vs prepare' ); } sub compute_pgbouncer_graphs { my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %pgb_per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $pgb_per_minute_info{$tm}{$h}); my %c_dataavg = (); my %s_dataavg = (); foreach my $m ("00" .. "59") { my $t = timegm_nocheck(0, $m, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); # pgBouncer stats are generate each minutes, always keep this interval $graph_data{'request'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_req} || 0) . "],"; $graph_data{'inbytes'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_inbytes} || 0) . "],"; $graph_data{'outbytes'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_outbytes} || 0) . "],"; $graph_data{'avgduration'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_avgduration} || 0) . "],"; next if (!exists $pgb_per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if (exists $pgb_per_minute_info{$tm}{$h}{$m}{connection}) { # Average per minute $c_dataavg{average}{"$rd"} += $pgb_per_minute_info{$tm}{$h}{$m}{connection}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}}) { $c_dataavg{max}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); $c_dataavg{min}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"}); } delete $pgb_per_minute_info{$tm}{$h}{$m}{connection}; } if (exists $pgb_per_minute_info{$tm}{$h}{$m}{session}) { # Average per minute $s_dataavg{average}{"$rd"} += $pgb_per_minute_info{$tm}{$h}{$m}{session}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$pgb_per_minute_info{$tm}{$h}{$m}{session}{second}}) { $s_dataavg{max}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} > $s_dataavg{max}{"$rd"}); $s_dataavg{min}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} < $s_dataavg{min}{"$rd"}); } delete $pgb_per_minute_info{$tm}{$h}{$m}{session}; } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (scalar keys %c_dataavg) { # Average connections per minute $graph_data{conn_avg} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{conn_max} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{conn_min} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; } if (scalar keys %s_dataavg) { # Average connections per minute $graph_data{sess_avg} .= "[$t, " . int(($s_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{sess_max} .= "[$t, " . ($s_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{sess_min} .= "[$t, " . ($s_dataavg{min}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } $drawn_graphs{'pgb_requestpersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_requestpersecond_graph', $graph_data{request},'',,'','Request per seconds (1 minute average)', '', 'Request per second'); $drawn_graphs{'pgb_bytepersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_bytepersecond_graph', $graph_data{inbytes},$graph_data{'outbytes'},'','Bytes I/O per seconds (1 minute average)', 'size', 'In b/s', 'Out b/s'); $drawn_graphs{'pgb_avgduration_graph'} = &jqplot_linegraph( $graphid++, 'pgb_avgduration_graph', $graph_data{avgduration},'','', 'Average query duration (1 minute average)', 'duration', 'Duration'); $drawn_graphs{'pgb_connectionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_connectionspersecond_graph', $graph_data{conn_max}, $graph_data{conn_avg}, $graph_data{conn_min}, 'Connections per second (' . $avg_minutes . ' minutes average)', 'Connections per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'pgb_sessionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_sessionspersecond_graph', $graph_data{sess_max}, $graph_data{sess_avg}, $graph_data{sess_min}, 'Number of sessions/second (' . $avg_minutes . ' minutes average)', 'Sessions', 'Maximum', 'Average', 'Minimum' ); } sub print_established_connection { my $curdb = shift; my $connection_peak = 0; my $connection_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{connection} <=> $overall_stat{$curdb}{'peak'}{$a}{connection}} keys %{$overall_stat{$curdb}{'peak'}}) { $connection_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{connection}); $connection_peak_date = $_ if ($connection_peak); last; } print $fh qq{

Established Connections

Key values

  • $connection_peak connections Connection Peak
  • $connection_peak_date Date
$drawn_graphs{connectionspersecond_graph}
}; delete $drawn_graphs{connectionspersecond_graph}; } sub print_established_pgb_connection { my $connection_peak = 0; my $connection_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{connection} <=> $pgb_overall_stat{'peak'}{$a}{connection}} keys %{$pgb_overall_stat{'peak'}}) { $connection_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{connection}); $connection_peak_date = $_; last; } print $fh qq{

Established Connections

Key values

  • $connection_peak connections Connection Peak
  • $connection_peak_date Date
$drawn_graphs{pgb_connectionspersecond_graph}
}; delete $drawn_graphs{pgb_connectionspersecond_graph}; } sub print_user_connection { my $curdb = shift; my %infos = (); my $total_count = 0; my $c = 0; my $conn_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$connection_info{$curdb}{user}}) { $conn_user_info .= "$u" . &comma_numbers($connection_info{$curdb}{user}{$u}) . ""; $total_count += $connection_info{$curdb}{user}{$u}; if ($main_user[1] < $connection_info{$curdb}{user}{$u}) { $main_user[0] = $u; $main_user[1] = $connection_info{$curdb}{user}{$u}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{$curdb}{user}}) { if ((($connection_info{$curdb}{user}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{$curdb}{user}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{$curdb}{user}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{userconnections_graph} = &jqplot_piegraph($graphid++, 'graph_userconnections', 'Connections per user', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per user

Key values

  • $main_user[0] Main User
  • $total_count connections Total
$drawn_graphs{userconnections_graph}
$conn_user_info
User Count
}; delete $drawn_graphs{userconnections_graph}; } sub print_user_pgb_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$pgb_connection_info{user}}) { $conn_user_info .= "$u" . &comma_numbers($pgb_connection_info{user}{$u}) . ""; $total_count += $pgb_connection_info{user}{$u}; if ($main_user[1] < $pgb_connection_info{user}{$u}) { $main_user[0] = $u; $main_user[1] = $pgb_connection_info{user}{$u}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{user}}) { if ((($pgb_connection_info{user}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{user}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{user}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_userconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_userconnections', 'Connections per user', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per user

Key values

  • $main_user[0] Main User
  • $total_count connections Total
$drawn_graphs{pgb_userconnections_graph}
$conn_user_info
User Count
}; delete $drawn_graphs{pgb_userconnections_graph}; } sub print_host_connection { my $curdb = shift(); my %infos = (); my $total_count = 0; my $c = 0; my $conn_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$connection_info{$curdb}{host}}) { $conn_host_info .= "$h" . &comma_numbers($connection_info{$curdb}{host}{$h}) . ""; $total_count += $connection_info{$curdb}{host}{$h}; if ($main_host[1] < $connection_info{$curdb}{host}{$h}) { $main_host[0] = $h; $main_host[1] = $connection_info{$curdb}{host}{$h}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{$curdb}{host}}) { if ((($connection_info{$curdb}{host}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{$curdb}{host}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{$curdb}{host}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{hostconnections_graph} = &jqplot_piegraph($graphid++, 'graph_hostconnections', 'Connections per host', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per host

Key values

  • $main_host[0] Main host with $main_host[1] connections
  • $total_count Total connections
$drawn_graphs{hostconnections_graph}
$conn_host_info
Host Count
}; delete $drawn_graphs{hostconnections_graph}; } sub print_host_pgb_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$pgb_connection_info{host}}) { $conn_host_info .= "$h" . &comma_numbers($pgb_connection_info{host}{$h}) . ""; $total_count += $pgb_connection_info{host}{$h}; if ($main_host[1] < $pgb_connection_info{host}{$h}) { $main_host[0] = $h; $main_host[1] = $pgb_connection_info{host}{$h}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{host}}) { if ((($pgb_connection_info{host}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{host}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{host}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_hostconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_hostconnections', 'Connections per host', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per host

Key values

  • $main_host[0] Main host with $main_host[1] connections
  • $total_count Total connections
$drawn_graphs{pgb_hostconnections_graph}
$conn_host_info
Host Count
}; delete $drawn_graphs{pgb_hostconnections_graph}; } sub print_database_connection { my $curdb = shift; my %infos = (); my $total_count = 0; my $conn_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$connection_info{$curdb}{database}}) { $conn_database_info .= "$d " . &comma_numbers($connection_info{$curdb}{database}{$d}) . ""; $total_count += $connection_info{$curdb}{database}{$d}; if ($main_database[1] < $connection_info{$curdb}{database}{$d}) { $main_database[0] = $d; $main_database[1] = $connection_info{$curdb}{database}{$d}; } foreach my $u (sort keys %{$connection_info{$curdb}{user}}) { next if (!exists $connection_info{$curdb}{database_user}{$d}{$u}); $conn_database_info .= " $u" . &comma_numbers($connection_info{$curdb}{database_user}{$d}{$u}) . ""; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{$curdb}{database}}) { if ((($connection_info{$curdb}{database}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{$curdb}{database}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{$curdb}{database}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{databaseconnections_graph} = &jqplot_piegraph($graphid++, 'graph_databaseconnections', 'Connections per database', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per database

Key values

  • $main_database[0] Main Database
  • $total_count connections Total
$drawn_graphs{databaseconnections_graph}
$conn_database_info
Database User Count
}; delete $drawn_graphs{databaseconnections_graph}; } sub print_database_pgb_connection { my %infos = (); my $total_count = 0; my $conn_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$pgb_connection_info{database}}) { $conn_database_info .= "$d " . &comma_numbers($pgb_connection_info{database}{$d}) . ""; $total_count += $pgb_connection_info{database}{$d}; if ($main_database[1] < $pgb_connection_info{database}{$d}) { $main_database[0] = $d; $main_database[1] = $pgb_connection_info{database}{$d}; } foreach my $u (sort keys %{$pgb_connection_info{user}}) { next if (!exists $pgb_connection_info{database_user}{$d}{$u}); $conn_database_info .= " $u" . &comma_numbers($pgb_connection_info{database_user}{$d}{$u}) . ""; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{database}}) { if ((($pgb_connection_info{database}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{database}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{database}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_databaseconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_databaseconnections', 'Connections per database', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per database

Key values

  • $main_database[0] Main Database
  • $total_count connections Total
$drawn_graphs{pgb_databaseconnections_graph}
$conn_database_info
Database User Count
}; delete $drawn_graphs{pgb_databaseconnections_graph}; } sub print_simultaneous_session { my $curdb = shift; my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{session} <=> $overall_stat{$curdb}{'peak'}{$a}{session}} keys %{$overall_stat{$curdb}{'peak'}}) { $session_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{session}); $session_peak_date = $_ if ($session_peak); last; } print $fh qq{

Simultaneous sessions

Key values

  • $session_peak sessions Session Peak
  • $session_peak_date Date
$drawn_graphs{sessionspersecond_graph}
}; delete $drawn_graphs{sessionspersecond_graph}; } sub print_simultaneous_pgb_session { my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{session} <=> $pgb_overall_stat{'peak'}{$a}{session}} keys %{$pgb_overall_stat{'peak'}}) { $session_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{session}); $session_peak_date = $_; last; } print $fh qq{

Simultaneous sessions

Key values

  • $session_peak sessions Session Peak
  • $session_peak_date Date
$drawn_graphs{pgb_sessionspersecond_graph}
}; delete $drawn_graphs{pgb_sessionspersecond_graph}; } sub print_histogram_session_times { my $curdb = shift; my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_session_time; $i++) { $histogram_info .= "" . &convert_time($histogram_session_time[$i-1]) . '-' . &convert_time($histogram_session_time[$i]) . "" . &comma_numbers($overall_stat{$curdb}{histogram}{session_time}{$histogram_session_time[$i-1]}) . "" . sprintf("%0.2f", ($overall_stat{$curdb}{histogram}{session_time}{$histogram_session_time[$i-1]} * 100) / ($overall_stat{$curdb}{histogram}{session_total}||1)) . "%"; $data{"$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"} = ($overall_stat{$curdb}{histogram}{session_time}{$histogram_session_time[$i-1]} || 0); if ($overall_stat{$curdb}{histogram}{session_time}{$histogram_session_time[$i-1]} > $most_range_value) { $most_range = "$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"; $most_range_value = $overall_stat{$curdb}{histogram}{session_time}{$histogram_session_time[$i-1]}; } } if ($graph) { if ($overall_stat{$curdb}{histogram}{session_total} > 0) { $histogram_info .= " > " . &convert_time($histogram_session_time[-1]) . "" . &comma_numbers($overall_stat{$curdb}{histogram}{session_time}{'-1'}) . "" . sprintf("%0.2f", ($overall_stat{$curdb}{histogram}{session_time}{'-1'} * 100) / ($overall_stat{$curdb}{histogram}{session_total}||1)) . "%"; $data{"> $histogram_session_time[-1]ms"} = ($overall_stat{$curdb}{histogram}{session_time}{"-1"} || 0); if ($overall_stat{$curdb}{histogram}{session_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_session_time[-1]ms"; $most_range_value = $overall_stat{$curdb}{histogram}{session_time}{"-1"}; } $drawn_graphs{histogram_session_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_histogram_session_times', 'Sessions', \@histogram_session_time, %data); } else { $histogram_info = qq{$NODATA}; $drawn_graphs{histogram_session_times_graph} = qq{$NODATA}; } } else { $histogram_info = qq{$NODATA}; $drawn_graphs{histogram_session_times_graph} = qq{$NODATA}; } $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Histogram of session times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{histogram_session_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{histogram_session_times_graph}; } sub print_histogram_pgb_session_times { my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_session_time; $i++) { $histogram_info .= "" . &convert_time($histogram_session_time[$i-1]) . '-' . &convert_time($histogram_session_time[$i]) . "" . &comma_numbers($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}) . "" . sprintf("%0.2f", ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} * 100) / ($pgb_overall_stat{histogram}{session_total}||1)) . "%"; $data{"$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"} = ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} || 0); if ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} > $most_range_value) { $most_range = "$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"; $most_range_value = $pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}; } } if ($pgb_overall_stat{histogram}{session_total} > 0) { $histogram_info .= " > " . &convert_time($histogram_session_time[-1]) . "" . &comma_numbers($pgb_overall_stat{histogram}{session_time}{'-1'}) . "" . sprintf("%0.2f", ($pgb_overall_stat{histogram}{session_time}{'-1'} * 100) / ($pgb_overall_stat{histogram}{session_total}||1)) . "%"; $data{"> $histogram_session_time[-1]ms"} = ($pgb_overall_stat{histogram}{session_time}{"-1"} || 0); if ($pgb_overall_stat{histogram}{session_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_session_time[-1]ms"; $most_range_value = $pgb_overall_stat{histogram}{session_time}{"-1"}; } $drawn_graphs{pgb_histogram_session_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_pgb_histogram_session_times', 'Sessions', \@histogram_session_time, %data); } else { $histogram_info = qq{$NODATA}; $drawn_graphs{pgb_histogram_session_times_graph} = qq{$NODATA}; } $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Histogram of session times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{pgb_histogram_session_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{pgb_histogram_session_times_graph}; } sub print_user_session { my $curdb = shift; my %infos = (); my $total_count = 0; my $c = 0; my $sess_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$session_info{$curdb}{user}}) { $session_info{user}{$u}{count} ||= 1; $sess_user_info .= "$u" . &comma_numbers($session_info{user}{$u}{count}) . "" . &convert_time($session_info{user}{$u}{duration}) . "" . &convert_time($session_info{user}{$u}{duration} / $session_info{user}{$u}{count}) . ""; $total_count += $session_info{$curdb}{user}{$u}{count}; if ($main_user[1] < $session_info{$curdb}{user}{$u}{count}) { $main_user[0] = $u; $main_user[1] = $session_info{$curdb}{user}{$u}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{$curdb}{user}}) { if ((($session_info{$curdb}{user}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{$curdb}{user}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{$curdb}{user}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{usersessions_graph} = &jqplot_piegraph($graphid++, 'graph_usersessions', 'Sessions per user', %infos); $sess_user_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per user

Key values

  • $main_user[0] Main User
  • $total_count sessions Total
$drawn_graphs{usersessions_graph}
$sess_user_info
User Count Total Duration Average Duration
}; delete $drawn_graphs{usersessions_graph}; } sub print_user_pgb_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$pgb_session_info{user}}) { $sess_user_info .= "$u" . &comma_numbers($pgb_session_info{user}{$u}{count}) . "" . &convert_time($pgb_session_info{user}{$u}{duration}) . "" . &convert_time($pgb_session_info{user}{$u}{duration} / $pgb_session_info{user}{$u}{count}) . ""; $total_count += $pgb_session_info{user}{$u}{count}; if ($main_user[1] < $pgb_session_info{user}{$u}{count}) { $main_user[0] = $u; $main_user[1] = $pgb_session_info{user}{$u}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{user}}) { if ((($pgb_session_info{user}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{user}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{user}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_usersessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_usersessions', 'Sessions per user', %infos); $sess_user_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per user

Key values

  • $main_user[0] Main User
  • $total_count sessions Total
$drawn_graphs{pgb_usersessions_graph}
$sess_user_info
User Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_usersessions_graph}; } sub print_host_session { my $curdb = shift; my %infos = (); my $total_count = 0; my $c = 0; my $sess_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$session_info{$curdb}{host}}) { $sess_host_info .= "$h" . &comma_numbers($session_info{$curdb}{host}{$h}{count}) . "" . &convert_time($session_info{$curdb}{host}{$h}{duration}) . "" . &convert_time($session_info{$curdb}{host}{$h}{duration} / $session_info{$curdb}{host}{$h}{count}) . ""; $total_count += $session_info{$curdb}{host}{$h}{count}; if ($main_host[1] < $session_info{$curdb}{host}{$h}{count}) { $main_host[0] = $h; $main_host[1] = $session_info{$curdb}{host}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{$curdb}{host}}) { if ((($session_info{$curdb}{host}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{$curdb}{host}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{$curdb}{host}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{hostsessions_graph} = &jqplot_piegraph($graphid++, 'graph_hostsessions', 'Sessions per host', %infos); $sess_host_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per host

Key values

  • $main_host[0] Main Host
  • $total_count sessions Total
$drawn_graphs{hostsessions_graph}
$sess_host_info
Host Count Total Duration Average Duration
}; delete $drawn_graphs{hostsessions_graph}; } sub print_host_pgb_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$pgb_session_info{host}}) { $sess_host_info .= "$h" . &comma_numbers($pgb_session_info{host}{$h}{count}) . "" . &convert_time($pgb_session_info{host}{$h}{duration}) . "" . &convert_time($pgb_session_info{host}{$h}{duration} / $pgb_session_info{host}{$h}{count}) . ""; $total_count += $pgb_session_info{host}{$h}{count}; if ($main_host[1] < $pgb_session_info{host}{$h}{count}) { $main_host[0] = $h; $main_host[1] = $pgb_session_info{host}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{host}}) { if ((($pgb_session_info{host}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{host}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{host}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_hostsessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_hostsessions', 'Sessions per host', %infos); $sess_host_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per host

Key values

  • $main_host[0] Main Host
  • $total_count sessions Total
$drawn_graphs{pgb_hostsessions_graph}
$sess_host_info
Host Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_hostsessions_graph}; } sub print_app_session { my $curdb = shift; my %infos = (); my $total_count = 0; my $c = 0; my $sess_app_info = ''; my @main_app = ('unknown',0); foreach my $h (sort keys %{$session_info{$curdb}{app}}) { $sess_app_info .= "$h" . &comma_numbers($session_info{$curdb}{app}{$h}{count}) . "" . &convert_time($session_info{$curdb}{app}{$h}{duration}) . "" . &convert_time($session_info{$curdb}{app}{$h}{duration} / $session_info{$curdb}{app}{$h}{count}) . ""; $total_count += $session_info{$curdb}{app}{$h}{count}; if ($main_app[1] < $session_info{$curdb}{app}{$h}{count}) { $main_app[0] = $h; $main_app[1] = $session_info{$curdb}{app}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{$curdb}{app}}) { if ((($session_info{$curdb}{app}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{$curdb}{app}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{$curdb}{app}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{appsessions_graph} = &jqplot_piegraph($graphid++, 'graph_appsessions', 'Sessions per application', %infos); $sess_app_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per application

Key values

  • $main_app[0] Main Host
  • $total_count sessions Total
$drawn_graphs{appsessions_graph}
$sess_app_info
Application Count Total Duration Average Duration
}; delete $drawn_graphs{appsessions_graph}; } sub print_database_session { my $curdb = shift; my %infos = (); my $total_count = 0; my $sess_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$session_info{$curdb}{database}}) { $sess_database_info .= "$d" . &comma_numbers($session_info{$curdb}{database}{$d}{count}) . "" . &convert_time($session_info{$curdb}{database}{$d}{duration}) . "" . &convert_time($session_info{$curdb}{database}{$d}{duration} / $session_info{$curdb}{database}{$d}{count}) . ""; $total_count += $session_info{$curdb}{database}{$d}{count}; if ($main_database[1] < $session_info{$curdb}{database}{$d}{count}) { $main_database[0] = $d; $main_database[1] = $session_info{$curdb}{database}{$d}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{$curdb}{database}}) { if ((($session_info{$curdb}{database}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{$curdb}{database}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{$curdb}{database}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{databasesessions_graph} = &jqplot_piegraph($graphid++, 'graph_databasesessions', 'Sessions per database', %infos); $sess_database_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per database

Key values

  • $main_database[0] Main Database
  • $total_count sessions Total
$drawn_graphs{databasesessions_graph}
$sess_database_info
Database User Count Total Duration Average Duration
}; delete $drawn_graphs{databasesessions_graph}; } sub print_database_pgb_session { my %infos = (); my $total_count = 0; my $sess_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$pgb_session_info{database}}) { $sess_database_info .= "$d" . &comma_numbers($pgb_session_info{database}{$d}{count}) . "" . &convert_time($pgb_session_info{database}{$d}{duration}) . "" . &convert_time($pgb_session_info{database}{$d}{duration} / $pgb_session_info{database}{$d}{count}) . ""; $total_count += $pgb_session_info{database}{$d}{count}; if ($main_database[1] < $pgb_session_info{database}{$d}{count}) { $main_database[0] = $d; $main_database[1] = $pgb_session_info{database}{$d}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{database}}) { if ((($pgb_session_info{database}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{database}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{database}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_databasesessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_databasesessions', 'Sessions per database', %infos); $sess_database_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per database

Key values

  • $main_database[0] Main Database
  • $total_count sessions Total
$drawn_graphs{pgb_databasesessions_graph}
$sess_database_info
Database User Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_databasesessions_graph}; } sub print_checkpoint { my $curdb = shift; # checkpoint my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($checkpoint_info{wbuffer}) { $chk_dataavg{wbuffer}{"$rd"} = 0 if (!exists $chk_dataavg{wbuffer}{"$rd"}); $chk_dataavg{file_added}{"$rd"} = 0 if (!exists $chk_dataavg{file_added}{"$rd"}); $chk_dataavg{file_removed}{"$rd"} = 0 if (!exists $chk_dataavg{file_removed}{"$rd"}); $chk_dataavg{file_recycled}{"$rd"} = 0 if (!exists $chk_dataavg{file_recycled}{"$rd"}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}) { $chk_dataavg{wbuffer}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{wbuffer} || 0); $chk_dataavg{file_added}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{file_added} || 0); $chk_dataavg{file_removed}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{file_removed} || 0); $chk_dataavg{file_recycled}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{file_recycled} || 0); } } if (exists $checkpoint_info{distance} || exists $checkpoint_info{estimate}) { $chk_dataavg{distance}{"$rd"} = 0 if (!exists $chk_dataavg{distance}{"$rd"}); $chk_dataavg{estimate}{"$rd"} = 0 if (!exists $chk_dataavg{estimate}{"$rd"}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}) { $chk_dataavg{distance}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{distance} || 0) * 1000; $chk_dataavg{distance_count}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{distance_count} || 1); $chk_dataavg{estimate}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{checkpoint}{estimate} || 0) * 1000; } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); # Average of written checkpoint buffers and wal files if (exists $chk_dataavg{wbuffer}) { $graph_data{wbuffer} .= "[$t, " . ($chk_dataavg{wbuffer}{"$rd"} || 0) . "],"; $graph_data{file_added} .= "[$t, " . ($chk_dataavg{file_added}{"$rd"} || 0) . "],"; $graph_data{file_removed} .= "[$t, " . ($chk_dataavg{file_removed}{"$rd"} || 0) . "],"; $graph_data{file_recycled} .= "[$t, " . ($chk_dataavg{file_recycled}{"$rd"} || 0) . "],"; } if (exists $chk_dataavg{distance} || $chk_dataavg{estimate}) { $graph_data{distance} .= "[$t, " . int(($chk_dataavg{distance}{"$rd"}/($chk_dataavg{distance_count}{"$rd"} || 1)) || 0) . "],"; $graph_data{estimate} .= "[$t, " . int(($chk_dataavg{estimate}{"$rd"}/($chk_dataavg{distance_count}{"$rd"} || 1)) || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Checkpoint buffers and files $drawn_graphs{checkpointwritebuffers_graph} = &jqplot_linegraph($graphid++, 'checkpointwritebuffers_graph', $graph_data{wbuffer}, '', '', 'Checkpoint write buffers (' . $avg_minutes . ' minutes period)', 'Buffers', 'Write buffers', '', '' ); $drawn_graphs{checkpointfiles_graph} = &jqplot_linegraph($graphid++, 'checkpointfiles_graph', $graph_data{file_added}, $graph_data{file_removed}, $graph_data{file_recycled}, 'Checkpoint Wal files usage (' . $avg_minutes . ' minutes period)', 'Number of files', 'Added', 'Removed', 'Recycled' ); $drawn_graphs{checkpointdistance_graph} = &jqplot_linegraph($graphid++, 'checkpointdistance_graph', $graph_data{distance}, $graph_data{estimate}, '', 'Checkpoint mean distance and estimate (' . $avg_minutes . ' minutes period)', 'Number of bytes', 'distance', 'estimate' ); my $checkpoint_wbuffer_peak = 0; my $checkpoint_wbuffer_peak_date = ''; foreach (sort { $overall_checkpoint{'peak'}{$b}{checkpoint_wbuffer} <=> $overall_checkpoint{'peak'}{$a}{checkpoint_wbuffer} } keys %{$overall_checkpoint{'peak'}}) { $checkpoint_wbuffer_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{checkpoint_wbuffer}); $checkpoint_wbuffer_peak_date = $_; last; } my $walfile_usage_peak = 0; my $walfile_usage_peak_date = ''; foreach (sort { $overall_checkpoint{'peak'}{$b}{walfile_usage} <=> $overall_checkpoint{'peak'}{$a}{walfile_usage} } keys %{$overall_checkpoint{'peak'}}) { $walfile_usage_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{walfile_usage}); $walfile_usage_peak_date = $_; last; } my $checkpoint_distance_peak = 0; my $checkpoint_distance_peak_date = ''; foreach (sort { $overall_checkpoint{'peak'}{$b}{distance} <=> $overall_checkpoint{'peak'}{$a}{distance} } keys %{$overall_checkpoint{'peak'}}) { $checkpoint_distance_peak = &comma_numbers(sprintf("%.2f", $overall_checkpoint{'peak'}{$_}{distance}/1024)); $checkpoint_distance_peak_date = $_; last; } print $fh qq{

Checkpoints / Restartpoints

Checkpoints Buffers

Key values

  • $checkpoint_wbuffer_peak buffers Checkpoint Peak
  • $checkpoint_wbuffer_peak_date Date
  • $overall_checkpoint{checkpoint_write} seconds Highest write time
  • $overall_checkpoint{checkpoint_sync} seconds Sync time
$drawn_graphs{checkpointwritebuffers_graph}
}; delete $drawn_graphs{checkpointwritebuffers_graph}; print $fh qq{

Checkpoints Wal files

Key values

  • $walfile_usage_peak files Wal files usage Peak
  • $walfile_usage_peak_date Date
$drawn_graphs{checkpointfiles_graph}
}; delete $drawn_graphs{checkpointfiles_graph}; print $fh qq{

Checkpoints distance

Key values

  • $checkpoint_distance_peak Mo Distance Peak
  • $checkpoint_distance_peak_date Date
$drawn_graphs{checkpointdistance_graph}
}; delete $drawn_graphs{checkpointdistance_graph}; my $buffers = ''; my $files = ''; my $warnings = ''; my $distance = ''; foreach my $d (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$curdb}{$d}}) { $buffers .= "$zday$h"; $files .= "$zday$h"; $warnings .= "$zday$h"; $distance .= "$zday$h"; $zday = ''; my %cinf = (); my %rinf = (); my %cainf = (); my %rainf = (); my %dinf = (); foreach my $m (keys %{$per_minute_info{$curdb}{$d}{$h}}) { if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}) { $cinf{wbuffer} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{wbuffer}; $cinf{file_added} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{file_added}; $cinf{file_removed} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{file_removed}; $cinf{file_recycled} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{file_recycled}; $cinf{write} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{write}; $cinf{sync} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{sync}; $cinf{total} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{total}; $cainf{sync_files} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{sync_files}; $cainf{sync_avg} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{sync_avg}; $cainf{sync_longest} = $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{sync_longest} if ($per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{sync_longest} > $cainf{sync_longest}); } if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{warning}) { $cinf{warning} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{warning}; $cinf{warning_seconds} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{warning_seconds}; } if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{distance} || $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{estimate}) { $dinf{distance}{sum} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{distance}; $dinf{estimate}{sum} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{estimate}; $dinf{distance}{count} += $per_minute_info{$curdb}{$d}{$h}{$m}{checkpoint}{distance_count}; } } if (scalar keys %cinf) { $buffers .= "" . &comma_numbers($cinf{wbuffer}) . "" . &comma_numbers($cinf{write}) . 's' . "" . &comma_numbers($cinf{sync}) . 's' . "" . &comma_numbers($cinf{total}) . 's' . ""; $files .= "" . &comma_numbers($cinf{file_added}) . "" . &comma_numbers($cinf{file_removed}) . "" . &comma_numbers($cinf{file_recycled}) . "" . &comma_numbers($cainf{sync_files}) . "" . &comma_numbers($cainf{sync_longest}) . 's' . "" . &comma_numbers($cainf{sync_avg}) . 's' . ""; } else { $buffers .= "00s0s0s"; $files .= "00000s0s"; } if (exists $cinf{warning}) { $warnings .= "" . &comma_numbers($cinf{warning}) . "" . &comma_numbers(sprintf( "%.2f", ($cinf{warning_seconds} || 0) / ($cinf{warning} || 1))) . "s"; } else { $warnings .= "00s"; } if (exists $dinf{distance} || $dinf{estimate}) { $distance .= "" . &comma_numbers(sprintf( "%.2f", $dinf{distance}{sum}/($dinf{distance}{count}||1))) . " kB" . &comma_numbers(sprintf( "%.2f", $dinf{estimate}{sum}/($dinf{distance}{count}||1))) . " kB"; } else { $distance .= "00"; } } } $buffers = qq{$NODATA} if (!$buffers); $files = qq{$NODATA} if (!$files); $warnings = qq{$NODATA} if (!$warnings); $distance = qq{$NODATA} if (!$distance); print $fh qq{

Checkpoints Activity

$buffers
Day Hour Written buffers Write time Sync time Total time
$files
Day Hour Added Removed Recycled Synced files Longest sync Average sync
$warnings
Day Hour Count Avg time (sec)
$distance
Day Hour Mean distance Mean estimate
Back to the top of the Checkpoint Activity table
}; } sub print_checkpoint_cause { my $curdb = shift; my %infos = (); my $total_count = 0; my $chkp_info = ''; my @main_checkpoint = ('unknown',0); foreach my $c (sort { $checkpoint_info{starting}{$b} <=> $checkpoint_info{starting}{$a} } keys %{$checkpoint_info{starting}}) { $chkp_info .= "$c" . &comma_numbers($checkpoint_info{starting}{$c}) . ""; $total_count += $checkpoint_info{starting}{$c}; if ($main_checkpoint[1] < $checkpoint_info{starting}{$c}) { $main_checkpoint[0] = $c; $main_checkpoint[1] = $checkpoint_info{starting}{$c}; } } $chkp_info .= "Total" . &comma_numbers($total_count) . ""; if ($graph) { my @small = (); foreach my $c (sort keys %{$checkpoint_info{starting}}) { if ((($checkpoint_info{starting}{$c} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$c} = $checkpoint_info{starting}{$c} || 0; } else { $infos{"Sum checkpoints cause < $pie_percentage_limit%"} += $checkpoint_info{starting}{$c} || 0; push(@small, $c); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum checkpoints cause < $pie_percentage_limit%"}; delete $infos{"Sum checkpoints cause < $pie_percentage_limit%"}; } } $drawn_graphs{checkpointcause_graph} = &jqplot_piegraph($graphid++, 'graph_checkpointcause', 'Checkpoint causes', %infos); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_checkpoint[0] =~ s/^([^\.]+)\.//) { $database = $1; } $chkp_info = qq{$NODATA} if (!$total_count); print $fh qq{

Checkpoint causes

Key values

  • $main_checkpoint[0] ($main_checkpoint[1]) Main checkpoint cause
  • $total_count checkpoints Total
$drawn_graphs{checkpointcause_graph}
$chkp_info
Cause Number of checkpoints
}; delete $drawn_graphs{checkpointcause_graph}; } sub print_temporary_file { my $curdb = shift; # checkpoint my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($tempfile_info{$curdb}{count}) { $t_dataavg{size}{"$rd"} = 0 if (!exists $t_dataavg{size}{"$rd"}); $t_dataavg{count}{"$rd"} = 0 if (!exists $t_dataavg{count}{"$rd"}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{'tempfile'}) { $t_dataavg{size}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{'tempfile'}{size} || 0); $t_dataavg{count}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{'tempfile'}{count} || 0); } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $t_dataavg{size}) { $graph_data{size} .= "[$t, " . ($t_dataavg{size}{"$rd"} || 0) . "],"; $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Temporary file size $drawn_graphs{temporarydata_graph} = &jqplot_linegraph($graphid++, 'temporarydata_graph', $graph_data{size}, '', '', 'Size of temporary files (' . $avg_minutes . ' minutes period)', 'Size of files', 'Size of files' ); # Temporary file number $drawn_graphs{temporaryfile_graph} = &jqplot_linegraph($graphid++, 'temporaryfile_graph', $graph_data{count}, '', '', 'Number of temporary files (' . $avg_minutes . ' minutes period)', 'Number of files', 'Number of files' ); my $tempfile_size_peak = 0; my $tempfile_size_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{tempfile_size} <=> $overall_stat{$curdb}{'peak'}{$a}{tempfile_size}} keys %{$overall_stat{$curdb}{'peak'}}) { $tempfile_size_peak = &pretty_print_size($overall_stat{$curdb}{'peak'}{$_}{tempfile_size}); $tempfile_size_peak_date = $_ if ($tempfile_size_peak); last; } print $fh qq{

Temporary Files

Size of temporary files

Key values

  • $tempfile_size_peak Temp Files size Peak
  • $tempfile_size_peak_date Date
$drawn_graphs{temporarydata_graph}
}; delete $drawn_graphs{temporarydata_graph}; my $tempfile_count_peak = 0; my $tempfile_count_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{tempfile_count} <=> $overall_stat{$curdb}{'peak'}{$a}{tempfile_count}} keys %{$overall_stat{$curdb}{'peak'}}) { $tempfile_count_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{tempfile_count}); $tempfile_count_peak_date = $_ if ($tempfile_count_peak); last; } print $fh qq{

Number of temporary files

Key values

  • $tempfile_count_peak per second Temp Files Peak
  • $tempfile_count_peak_date Date
$drawn_graphs{temporaryfile_graph}
}; delete $drawn_graphs{temporaryfile_graph}; my $tempfiles_activity = ''; foreach my $d (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$curdb}{$d}}) { $tempfiles_activity .= "$zday$h"; $zday = ""; my %tinf = (); foreach my $m (keys %{$per_minute_info{$curdb}{$d}{$h}}) { if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{'tempfile'}) { $tinf{size} += $per_minute_info{$curdb}{$d}{$h}{$m}{'tempfile'}{size}; $tinf{count} += $per_minute_info{$curdb}{$d}{$h}{$m}{'tempfile'}{count}; } } if (scalar keys %tinf) { my $temp_average = &pretty_print_size(sprintf("%.2f", $tinf{size} / $tinf{count})); $tempfiles_activity .= "" . &comma_numbers($tinf{count}) . "" . &pretty_print_size($tinf{size}) . "" . "$temp_average"; } else { $tempfiles_activity .= "000"; } } } $tempfiles_activity = qq{$NODATA} if (!$tempfiles_activity); print $fh qq{

Temporary Files Activity

$tempfiles_activity
Day Hour Count Total size Average size
Back to the top of the Temporary Files Activity table
}; } sub print_cancelled_queries { my $curdb = shift; my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($cancelled_info{count}) { $t_dataavg{count}{"$rd"} = 0 if (!exists $t_dataavg{count}{"$rd"}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{cancelled}) { $t_dataavg{count}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{cancelled}{count} || 0); } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $t_dataavg{count}) { $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Number of cancelled queries graph $drawn_graphs{cancelledqueries_graph} = &jqplot_linegraph($graphid++, 'cancelledqueries_graph', $graph_data{count}, '', '', 'Number of cancelled queries (' . $avg_minutes . ' minutes period)', 'Number of cancellation', 'Number of cancellation' ); my $cancelled_count_peak = 0; my $cancelled_count_peak_date = ''; foreach (sort {$overall_stat{$curdb}{'peak'}{$b}{cancelled_count} <=> $overall_stat{$curdb}{'peak'}{$a}{cancelled_count}} keys %{$overall_stat{$curdb}{'peak'}}) { $cancelled_count_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{cancelled_count}); $cancelled_count_peak_date = $_; last; } print $fh qq{

Number of cancelled queries

Key values

  • $cancelled_count_peak per second Cancelled query Peak
  • $cancelled_count_peak_date Date
$drawn_graphs{cancelledqueries_graph}
}; delete $drawn_graphs{cancelledqueries_graph}; } sub print_analyze_per_table { my $curdb = shift; # ANALYZE stats per table my %infos = (); my $total_count = 0; my $analyze_info = ''; my @main_analyze = ('unknown',0); foreach my $t (sort { $autoanalyze_info{$curdb}{tables}{$b}{analyzes} <=> $autoanalyze_info{$curdb}{tables}{$a}{analyzes} } keys %{$autoanalyze_info{$curdb}{tables}}) { $analyze_info .= "$t" . $autoanalyze_info{$curdb}{tables}{$t}{analyzes} . ""; $total_count += $autoanalyze_info{$curdb}{tables}{$t}{analyzes}; if ($main_analyze[1] < $autoanalyze_info{$curdb}{tables}{$t}{analyzes}) { $main_analyze[0] = $t; $main_analyze[1] = $autoanalyze_info{$curdb}{tables}{$t}{analyzes}; } } $analyze_info .= "Total" . &comma_numbers($total_count) . ""; if ($graph) { my @small = (); foreach my $d (sort keys %{$autoanalyze_info{$curdb}{tables}}) { if ((($autoanalyze_info{$curdb}{tables}{$d}{analyzes} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $autoanalyze_info{$curdb}{tables}{$d}{analyzes} || 0; } else { $infos{"Sum analyzes < $pie_percentage_limit%"} += $autoanalyze_info{$curdb}{tables}{$d}{analyzes} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum analyzes < $pie_percentage_limit%"}; delete $infos{"Sum analyzes < $pie_percentage_limit%"}; } } $drawn_graphs{tableanalyzes_graph} = &jqplot_piegraph($graphid++, 'graph_tableanalyzes', 'Analyzes per tables', %infos); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_analyze[0] =~ s/^([^\.]+)\.//) { $database = $1; } $analyze_info = qq{$NODATA} if (!$total_count); print $fh qq{

Analyzes per table

Key values

  • $main_analyze[0] ($main_analyze[1]) Main table analyzed (database $database)
  • $total_count analyzes Total
$drawn_graphs{tableanalyzes_graph}
$analyze_info
Table Number of analyzes
}; delete $drawn_graphs{tableanalyzes_graph}; } sub print_vacuum { my $curdb = shift; # checkpoint my %graph_data = (); foreach my $tm (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; my $has_data = 0; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$curdb}{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); $v_dataavg{acount}{"$rd"} = 0 if (!exists $v_dataavg{acount}{"$rd"}); $v_dataavg{vcount}{"$rd"} = 0 if (!exists $v_dataavg{vcount}{"$rd"}); if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{autovacuum}) { $v_dataavg{vcount}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{autovacuum}{count} || 0); } if (exists $per_minute_info{$curdb}{$tm}{$h}{$m}{autoanalyze}) { $v_dataavg{acount}{"$rd"} += ($per_minute_info{$curdb}{$tm}{$h}{$m}{autoanalyze}{count} || 0); } if ($v_dataavg{acount}{"$rd"} || $v_dataavg{vcount}{"$rd"}) { $has_data = 1; } } if ($has_data) { foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $v_dataavg{vcount}{"$rd"}) { $graph_data{vcount} .= "[$t, " . ($v_dataavg{vcount}{"$rd"} || 0) . "],"; } if (exists $v_dataavg{acount}{"$rd"}) { $graph_data{acount} .= "[$t, " . ($v_dataavg{acount}{"$rd"} || 0) . "],"; } } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } # VACUUMs vs ANALYZEs chart $drawn_graphs{autovacuum_graph} = $NODATA; if ($graph) { $drawn_graphs{autovacuum_graph} = &jqplot_linegraph($graphid++, 'autovacuum_graph', $graph_data{vcount}, $graph_data{acount}, '', 'Autovacuum actions (' . $avg_minutes . ' minutes period)', '', 'VACUUMs', 'ANALYZEs' ); } my $vacuum_size_peak = 0; my $vacuum_size_peak_date = ''; foreach (sort { $overall_stat{$curdb}{'peak'}{$b}{vacuum_size} <=> $overall_stat{$curdb}{'peak'}{$a}{vacuum_size} } keys %{$overall_stat{$curdb}{'peak'}}) { $vacuum_size_peak = &comma_numbers($overall_stat{$curdb}{'peak'}{$_}{vacuum_size}); $vacuum_size_peak_date = $_; last; } my $autovacuum_peak_system_usage_db = ''; if ($autovacuum_info{$curdb}{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { $autovacuum_peak_system_usage_db = $1; } my $autoanalyze_peak_system_usage_db = ''; if ($autoanalyze_info{$curdb}{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { $autoanalyze_peak_system_usage_db = $1; } $autovacuum_info{$curdb}{peak}{system_usage}{elapsed} ||= 0; $autoanalyze_info{$curdb}{peak}{system_usage}{elapsed} ||= 0; print $fh qq{

Vacuums

Vacuums / Analyzes Distribution

Key values

  • $autovacuum_info{$curdb}{peak}{system_usage}{elapsed} sec Highest CPU-cost vacuum
    Table $autovacuum_info{$curdb}{peak}{system_usage}{table}
    Database $autovacuum_peak_system_usage_db
  • $autovacuum_info{$curdb}{peak}{system_usage}{date} Date
  • $autoanalyze_info{$curdb}{peak}{system_usage}{elapsed} sec Highest CPU-cost analyze
    Table $autoanalyze_info{$curdb}{peak}{system_usage}{table}
    Database $autovacuum_peak_system_usage_db
  • $autoanalyze_info{$curdb}{peak}{system_usage}{date} Date
$drawn_graphs{autovacuum_graph}
}; delete $drawn_graphs{autovacuum_graph}; # ANALYZE stats per table &print_analyze_per_table($curdb); # VACUUM stats per table &print_vacuum_per_table($curdb); # Show tuples and pages removed per table &print_vacuum_tuple_removed($curdb); &print_vacuum_page_removed($curdb); my $vacuum_activity = ''; foreach my $d (sort {$a <=> $b} keys %{$per_minute_info{$curdb}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$curdb}{$d}}) { $vacuum_activity .= "$zday$h"; $zday = ""; my %ainf = (); foreach my $m (keys %{$per_minute_info{$curdb}{$d}{$h}}) { if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{autovacuum}{count}) { $ainf{vcount} += $per_minute_info{$curdb}{$d}{$h}{$m}{autovacuum}{count}; } if (exists $per_minute_info{$curdb}{$d}{$h}{$m}{autoanalyze}{count}) { $ainf{acount} += $per_minute_info{$curdb}{$d}{$h}{$m}{autoanalyze}{count}; } } if (scalar keys %ainf) { $vacuum_activity .= "" . &comma_numbers($ainf{vcount}) . ""; } else { $vacuum_activity .= "0"; } if (scalar keys %ainf) { $vacuum_activity .= "" . &comma_numbers($ainf{acount}) . ""; } else { $vacuum_activity .= "0"; } } } $vacuum_activity = qq{$NODATA} if (!$vacuum_activity); print $fh qq{

Autovacuum Activity

$vacuum_activity
Day Hour VACUUMs ANALYZEs
Back to the top of the Autovacuum Activity table
}; } sub print_vacuum_per_table { my $curdb = shift; # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $total_hits = 0; my $total_misses = 0; my $total_dirtied = 0; my $total_pins = 0; my $total_frozen = 0; my $total_records = 0; my $total_full_page = 0; my $total_bytes = 0; my $vacuum_info = ''; my @main_vacuum = ('unknown',0); foreach my $t (sort { $autovacuum_info{$curdb}{tables}{$b}{vacuums} <=> $autovacuum_info{$curdb}{tables}{$a}{vacuums} } keys %{$autovacuum_info{$curdb}{tables}}) { $vacuum_info .= "$t" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{vacuums}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{idxscans}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{hits}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{missed}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{dirtied}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{skip_pins}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{skip_frozen}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{wal_record}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{wal_full_page}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{wal_bytes}) . ""; $total_count += $autovacuum_info{$curdb}{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{$curdb}{tables}{$t}{idxscans}; $total_hits += $autovacuum_info{$curdb}{tables}{$t}{hits}; $total_misses += $autovacuum_info{$curdb}{tables}{$t}{misses}; $total_dirtied += $autovacuum_info{$curdb}{tables}{$t}{dirtied}; $total_pins += $autovacuum_info{$curdb}{tables}{$t}{skip_pins}; $total_frozen += $autovacuum_info{$curdb}{tables}{$t}{skip_frozen}; $total_records += $autovacuum_info{$curdb}{tables}{$t}{wal_record}; $total_full_page += $autovacuum_info{$curdb}{tables}{$t}{wal_full_page}; $total_bytes += $autovacuum_info{$curdb}{tables}{$t}{wal_bytes}; if ($main_vacuum[1] < $autovacuum_info{$curdb}{tables}{$t}{vacuums}) { $main_vacuum[0] = $t; $main_vacuum[1] = $autovacuum_info{$curdb}{tables}{$t}{vacuums}; } } $vacuum_info .= "Total" . &comma_numbers($total_count); $vacuum_info .= "" . &comma_numbers($total_idxscan); $vacuum_info .= "" . &comma_numbers($total_hits); $vacuum_info .= "" . &comma_numbers($total_misses); $vacuum_info .= "" . &comma_numbers($total_dirtied); $vacuum_info .= "" . &comma_numbers($total_pins); $vacuum_info .= "" . &comma_numbers($total_frozen); $vacuum_info .= "" . &comma_numbers($total_records); $vacuum_info .= "" . &comma_numbers($total_full_page); $vacuum_info .= "" . &comma_numbers($total_bytes); $vacuum_info .= ""; my %infos = (); my @small = (); foreach my $d (sort keys %{$autovacuum_info{$curdb}{tables}}) { if ((($autovacuum_info{$curdb}{tables}{$d}{vacuums} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $autovacuum_info{$curdb}{tables}{$d}{vacuums} || 0; } else { $infos{"Sum vacuums < $pie_percentage_limit%"} += $autovacuum_info{$curdb}{tables}{$d}{vacuums} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum vacuums < $pie_percentage_limit%"}; delete $infos{"Sum vacuums < $pie_percentage_limit%"}; } $drawn_graphs{tablevacuums_graph} = $NODATA; if ($graph) { $drawn_graphs{tablevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_tablevacuums', 'Vacuums per tables', %infos); } $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_vacuum[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Vacuums per table

Key values

  • $main_vacuum[0] ($main_vacuum[1]) Main table vacuumed on database $database
  • $total_count vacuums Total
$drawn_graphs{tablevacuums_graph}
$vacuum_info
 IndexBuffer usageSkippedWAL usage
Table Vacuums scans hits misses dirtied pins frozen records full page bytes
}; delete $drawn_graphs{tablevacuums_graph}; } sub print_vacuum_tuple_removed { my $curdb = shift; # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $total_tuple_remove = 0; my $total_tuple_remain = 0; my $total_tuple_notremovable = 0; my $total_page_remove = 0; my $total_page_remain = 0; my $vacuum_info = ''; my @main_tuple = ('unknown',0); foreach my $t (sort { $autovacuum_info{$curdb}{tables}{$b}{tuples}{removed} <=> $autovacuum_info{$curdb}{tables}{$a}{tuples}{removed} } keys %{$autovacuum_info{$curdb}{tables}}) { $vacuum_info .= "$t" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{vacuums}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{idxscans}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{tuples}{removed}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{tuples}{remain}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{tuples}{notremovable}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{pages}{removed}) . "" . &comma_numbers($autovacuum_info{$curdb}{tables}{$t}{pages}{remain}) . ""; $total_count += $autovacuum_info{$curdb}{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{$curdb}{tables}{$t}{idxscans}; $total_tuple_remove += $autovacuum_info{$curdb}{tables}{$t}{tuples}{removed}; $total_tuple_remain += $autovacuum_info{$curdb}{tables}{$t}{tuples}{remain}; $total_tuple_notremovable += $autovacuum_info{$curdb}{tables}{$t}{tuples}{notremovable}; $total_page_remove += $autovacuum_info{$curdb}{tables}{$t}{pages}{removed}; $total_page_remain += $autovacuum_info{$curdb}{tables}{$t}{pages}{remain}; if ($main_tuple[1] < $autovacuum_info{$curdb}{tables}{$t}{tuples}{removed}) { $main_tuple[0] = $t; $main_tuple[1] = $autovacuum_info{$curdb}{tables}{$t}{tuples}{removed}; } } $vacuum_info .= "Total" . &comma_numbers($total_count); $vacuum_info .= "" . &comma_numbers($total_idxscan); $vacuum_info .= "" . &comma_numbers($total_tuple_remove); $vacuum_info .= "" . &comma_numbers($total_tuple_remain); $vacuum_info .= "" . &comma_numbers($total_tuple_notremovable); $vacuum_info .= "" . &comma_numbers($total_page_remove); $vacuum_info .= "" . &comma_numbers($total_page_remain); $vacuum_info .= ""; my %infos_tuple = (); my @small = (); foreach my $d (sort keys %{$autovacuum_info{$curdb}{tables}}) { if ((($autovacuum_info{$curdb}{tables}{$d}{tuples}{removed} * 100) / ($total_tuple_remove||1)) > $pie_percentage_limit) { $infos_tuple{$d} = $autovacuum_info{$curdb}{tables}{$d}{tuples}{removed} || 0; } else { $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"} += $autovacuum_info{$curdb}{tables}{$d}{tuples}{removed} || 0; push(@small, $d); } } if ($#small == 0) { $infos_tuple{$small[0]} = $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; delete $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; } $drawn_graphs{tuplevacuums_graph} = $NODATA; if ($graph) { $drawn_graphs{tuplevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_tuplevacuums', 'Tuples removed per tables', %infos_tuple); } $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_tuple[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Tuples removed per table

Key values

  • $main_tuple[0] ($main_tuple[1]) Main table with removed tuples on database $database
  • $total_tuple_remove tuples Total removed
$drawn_graphs{tuplevacuums_graph}
$vacuum_info
 IndexTuplesPages
Table Vacuums scans removed remain not yet removable removed remain
}; delete $drawn_graphs{tuplevacuums_graph}; } sub print_vacuum_page_removed { my $curdb = shift; # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $total_tuple = 0; my $total_page = 0; my $vacuum_info = ''; my @main_tuple = ('unknown',0); my @main_page = ('unknown',0); my %infos_page = (); my @small = (); foreach my $t (sort { $autovacuum_info{$curdb}{tables}{$b}{pages}{removed} <=> $autovacuum_info{$curdb}{tables}{$a}{pages}{removed} } keys %{$autovacuum_info{$curdb}{tables}}) { $vacuum_info .= "$t" . $autovacuum_info{$curdb}{tables}{$t}{vacuums} . "" . $autovacuum_info{$curdb}{tables}{$t}{idxscans} . "" . $autovacuum_info{$curdb}{tables}{$t}{tuples}{removed} . "" . $autovacuum_info{$curdb}{tables}{$t}{pages}{removed} . ""; $total_count += $autovacuum_info{$curdb}{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{$curdb}{tables}{$t}{idxscans}; $total_tuple += $autovacuum_info{$curdb}{tables}{$t}{tuples}{removed}; $total_page += $autovacuum_info{$curdb}{tables}{$t}{pages}{removed}; if ($main_page[1] < $autovacuum_info{$curdb}{tables}{$t}{pages}{removed}) { $main_page[0] = $t; $main_page[1] = $autovacuum_info{$curdb}{tables}{$t}{pages}{removed}; } if ($autovacuum_info{$curdb}{tables}{$t}{pages}{removed} > 0) { if ((($autovacuum_info{$curdb}{tables}{$t}{pages}{removed} * 100) / ($total_page || 1)) > $pie_percentage_limit) { $infos_page{$t} = $autovacuum_info{$curdb}{tables}{$t}{pages}{removed} || 0; } else { $infos_page{"Sum pages removed < $pie_percentage_limit%"} += $autovacuum_info{$curdb}{tables}{$t}{pages}{removed} || 0; push(@small, $t); } } } $vacuum_info .= "Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . "" . &comma_numbers($total_tuple) . "" . &comma_numbers($total_page) . ""; if ($#small == 0) { $infos_page{$small[0]} = $infos_page{"Sum pages removed < $pie_percentage_limit%"}; delete $infos_page{"Sum pages removed < $pie_percentage_limit%"}; } $drawn_graphs{pagevacuums_graph} = $NODATA; if ($graph) { $drawn_graphs{pagevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_pagevacuums', 'Pages removed per tables', %infos_page); } $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = 'unknown'; if ($main_page[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Pages removed per table

Key values

  • $main_page[0] ($main_page[1]) Main table with removed pages on database $database
  • $total_page pages Total removed
$drawn_graphs{pagevacuums_graph}
$vacuum_info
Table Number of vacuums Index scans Tuples removed Pages removed
}; delete $drawn_graphs{pagevacuums_graph}; } sub print_lock_type { my $curdb = shift; my %locktype = (); my $total_count = 0; my $total_duration = 0; my $locktype_info = ''; my @main_locktype = ('unknown',0); foreach my $t (sort keys %{$lock_info{$curdb}}) { $locktype_info .= "$t" . &comma_numbers($lock_info{$curdb}{$t}{count}) . "" . &convert_time($lock_info{$curdb}{$t}{duration}) . "" . &convert_time($lock_info{$curdb}{$t}{duration} / ($lock_info{$curdb}{$t}{count} || 1)) . ""; $total_count += $lock_info{$curdb}{$t}{count}; $total_duration += $lock_info{$curdb}{$t}{duration}; if ($main_locktype[1] < $lock_info{$curdb}{$t}{count}) { $main_locktype[0] = $t; $main_locktype[1] = $lock_info{$curdb}{$t}{count}; } foreach my $o (sort keys %{$lock_info{$curdb}{$t}}) { next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); $locktype_info .= "$o" . &comma_numbers($lock_info{$curdb}{$t}{$o}{count}) . "" . &convert_time($lock_info{$curdb}{$t}{$o}{duration}) . "" . &convert_time($lock_info{$curdb}{$t}{$o}{duration} / $lock_info{$curdb}{$t}{$o}{count}) . "\n"; } } if ($total_count > 0) { $locktype_info .= "Total" . &comma_numbers($total_count) . "" . &convert_time($total_duration) . "" . &convert_time($total_duration / ($total_count || 1)) . ""; } else { $locktype_info = qq{$NODATA}; } if ($graph) { my @small = (); foreach my $d (sort keys %{$lock_info{$curdb}}) { if ((($lock_info{$curdb}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $locktype{$d} = $lock_info{$curdb}{$d}{count} || 0; } else { $locktype{"Sum lock types < $pie_percentage_limit%"} += $lock_info{$curdb}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $locktype{$small[0]} = $locktype{"Sum types < $pie_percentage_limit%"}; delete $locktype{"Sum lock types < $pie_percentage_limit%"}; } } $drawn_graphs{lockbytype_graph} = &jqplot_piegraph($graphid++, 'graph_lockbytype', 'Type of locks', %locktype); $total_count = &comma_numbers($total_count); print $fh qq{

Locks

Locks by types

Key values

  • $main_locktype[0] Main Lock Type
  • $total_count locks Total
$drawn_graphs{lockbytype_graph}
$locktype_info
Type Object Count Total Duration Average Duration (s)
}; delete $drawn_graphs{lockbytype_graph}; } sub print_query_type { my $curdb = shift; my %data = (); my $total_queries = 0; my $total_select = 0; my $total_write = 0; foreach my $a (@SQL_ACTION) { $total_queries += $overall_stat{$curdb}{lc($a)}; if ($a eq 'SELECT') { $total_select += $overall_stat{$curdb}{lc($a)}; } elsif ($a ne 'OTHERS') { $total_write += $overall_stat{$curdb}{lc($a)}; } } my $total = $overall_stat{$curdb}{'queries_number'}; my $querytype_info = ''; foreach my $a (@SQL_ACTION) { $querytype_info .= "$a" . &comma_numbers($overall_stat{$curdb}{lc($a)}) . "" . sprintf("%0.2f", ($overall_stat{$curdb}{lc($a)} * 100) / ($total||1)) . "%"; } if (($total - $total_queries) > 0) { $querytype_info .= "OTHERS" . &comma_numbers($total - $total_queries) . "" . sprintf("%0.2f", (($total - $total_queries) * 100) / ($total||1)) . "%"; } $querytype_info = qq{$NODATA} if (!$total); if ($graph && $total) { foreach my $t (@SQL_ACTION) { if ((($overall_stat{$curdb}{lc($t)} * 100) / ($total||1)) > $pie_percentage_limit) { $data{$t} = $overall_stat{$curdb}{lc($t)} || 0; } else { $data{"Sum query types < $pie_percentage_limit%"} += $overall_stat{$curdb}{lc($t)} || 0; } } if (((($total - $total_queries) * 100) / ($total||1)) > $pie_percentage_limit) { $data{'Others'} = $total - $total_queries; } else { $data{"Sum query types < $pie_percentage_limit%"} += $total - $total_queries; } } $drawn_graphs{queriesbytype_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbytype', 'Type of queries', %data); $total_select = &comma_numbers($total_select); $total_write = &comma_numbers($total_write); print $fh qq{

Queries

Queries by type

Key values

  • $total_select Total read queries
  • $total_write Total write queries
$drawn_graphs{queriesbytype_graph}
$querytype_info
Type Count Percentage
}; delete $drawn_graphs{queriesbytype_graph}; } sub print_query_per_database { my $curdb = shift; my %infos = (); my $total_count = 0; my $query_database_info = ''; my @main_database = ('unknown', 0); my @main_database_duration = ('unknown', 0); foreach my $d (sort keys %{$database_info{$curdb}}) { $query_database_info .= "$dTotal" . &comma_numbers($database_info{$curdb}{$d}{count}) . "" . &convert_time($database_info{$curdb}{$d}{duration}) . ""; $total_count += $database_info{$curdb}{$d}{count}; if ($main_database[1] < $database_info{$curdb}{$d}{count}) { $main_database[0] = $d; $main_database[1] = $database_info{$curdb}{$d}{count}; } if ($main_database_duration[1] < $database_info{$curdb}{$d}{duration}) { $main_database_duration[0] = $d; $main_database_duration[1] = $database_info{$curdb}{$d}{duration}; } foreach my $r (sort keys %{$database_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_database_info .= "$r" . &comma_numbers($database_info{$curdb}{$d}{$r}) . "" . &convert_time($database_info{$curdb}{$d}{"$r|duration"}) . ""; } } $query_database_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %{$database_info{$curdb}}) { if ((($database_info{$curdb}{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $database_info{$curdb}{$d}{count} || 0; } else { $infos{"Sum queries per databases < $pie_percentage_limit%"} += $database_info{$curdb}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per databases < $pie_percentage_limit%"}; delete $infos{"Sum queries per databases < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbydatabase_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbydatabase', 'Queries per database', %infos); $main_database[1] = &comma_numbers($main_database[1]); $main_database_duration[1] = &convert_time($main_database_duration[1]); print $fh qq{

Queries by database

Key values

  • $main_database[0] Main database
  • $main_database[1] Requests
  • $main_database_duration[1] ($main_database_duration[0])
  • Main time consuming database
$drawn_graphs{queriesbydatabase_graph}
$query_database_info
Database Request type Count Duration
}; delete $drawn_graphs{queriesbydatabase_graph}; } sub print_query_per_application { my $curdb = shift; my %infos = (); my $total_count = 0; my $query_application_info = ''; my @main_application = ('unknown', 0); my @main_application_duration = ('unknown', 0); foreach my $d (sort keys %{$application_info{$curdb}}) { $query_application_info .= "$dTotal" . &comma_numbers($application_info{$curdb}{$d}{count}) . "" . &convert_time($application_info{$curdb}{$d}{duration}) . ""; $total_count += $application_info{$curdb}{$d}{count}; if ($main_application[1] < $application_info{$curdb}{$d}{count}) { $main_application[0] = $d; $main_application[1] = $application_info{$curdb}{$d}{count}; } if ($main_application_duration[1] < $application_info{$curdb}{$d}{duration}) { $main_application_duration[0] = $d; $main_application_duration[1] = $application_info{$curdb}{$d}{duration}; } foreach my $r (sort keys %{$application_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_application_info .= "$r" . &comma_numbers($application_info{$curdb}{$d}{$r}) . "" . &convert_time($application_info{$curdb}{$d}{"$r|duration"}) . ""; } } $query_application_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %{$application_info{$curdb}}) { if ((($application_info{$curdb}{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $application_info{$curdb}{$d}{count} || 0; } else { $infos{"Sum queries per applications < $pie_percentage_limit%"} += $application_info{$curdb}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per applications < $pie_percentage_limit%"}; delete $infos{"Sum queries per applications < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyapplication_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyapplication', 'Queries per application', %infos); $main_application[1] = &comma_numbers($main_application[1]); $main_application_duration[1] = &convert_time($main_application_duration[1]); print $fh qq{

Queries by application

Key values

  • $main_application[0] Main application
  • $main_application[1] Requests
  • $main_application_duration[1] ($main_application_duration[0])
  • Main time consuming application
$drawn_graphs{queriesbyapplication_graph}
$query_application_info
Application Request type Count Duration
}; delete $drawn_graphs{queriesbyapplication_graph}; } sub print_query_per_user { my $curdb = shift; my %infos = (); my $total_count = 0; my $total_duration = 0; my $query_user_info = ''; my @main_user = ('unknown', 0); my @main_user_duration = ('unknown', 0); foreach my $d (sort keys %{$user_info{$curdb}}) { $query_user_info .= "$dTotal" . &comma_numbers($user_info{$curdb}{$d}{count}) . "" . &convert_time($user_info{$curdb}{$d}{duration}) . ""; $total_count += $user_info{$curdb}{$d}{count}; $total_duration += $user_info{$curdb}{$d}{duration}; if ($main_user[1] < $user_info{$curdb}{$d}{count}) { $main_user[0] = $d; $main_user[1] = $user_info{$curdb}{$d}{count}; } if ($main_user_duration[1] < $user_info{$curdb}{$d}{duration}) { $main_user_duration[0] = $d; $main_user_duration[1] = $user_info{$curdb}{$d}{duration}; } foreach my $r (sort keys %{$user_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_user_info .= "$r" . &comma_numbers($user_info{$curdb}{$d}{$r}) . "" . &convert_time($user_info{$curdb}{$d}{"$r|duration"}) . ""; } } $query_user_info = qq{$NODATA} if (!$total_count); if ($graph) { my %small = (); foreach my $d (sort keys %{$user_info{$curdb}}) { if ((($user_info{$curdb}{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{queries}{$d} = $user_info{$curdb}{$d}{count} || 0; } else { $infos{queries}{"Sum queries per users < $pie_percentage_limit%"} += $user_info{$curdb}{$d}{count} || 0; push(@{$small{queries}}, $d); } if ((($user_info{$curdb}{$d}{duration} * 100) / ($total_duration || 1)) > $pie_percentage_limit) { $infos{duration}{$d} = $user_info{$curdb}{$d}{duration} || 0; } else { $infos{duration}{"Sum duration per users < $pie_percentage_limit%"} += $user_info{$curdb}{$d}{duration} || 0; push(@{$small{duration}}, $d); } } if ($#{$small{queries}} == 0) { $infos{queries}{$small{queries}[0]} = $infos{queries}{"Sum queries per users < $pie_percentage_limit%"}; delete $infos{queries}{"Sum queries per users < $pie_percentage_limit%"}; } if ($#{$small{duration}} == 0){ $infos{duration}{$small{duration}[0]} = $infos{duration}{"Sum duration per users < $pie_percentage_limit%"}; delete $infos{duration}{"Sum duration per users < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyuser_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyuser', 'Queries per user', %{$infos{queries}}); $drawn_graphs{durationbyuser_graph} = &jqplot_piegraph($graphid++, 'graph_durationbyuser', 'Duration per user', %{$infos{duration}}); $main_user[1] = &comma_numbers($main_user[1]); $main_user_duration[1] = &convert_time($main_user_duration[1]); print $fh qq{

Queries by user

Key values

  • $main_user[0] Main user
  • $main_user[1] Requests
$drawn_graphs{queriesbyuser_graph}
$query_user_info
User Request type Count Duration
}; delete $drawn_graphs{queriesbyuser_graph}; print $fh qq{

Duration by user

Key values

  • $main_user_duration[1] ($main_user_duration[0]) Main time consuming user
$drawn_graphs{durationbyuser_graph}
$query_user_info
User Request type Count Duration
}; delete $drawn_graphs{durationbyuser_graph}; } sub print_query_per_host { my $curdb = shift; my %infos = (); my $total_count = 0; my $query_host_info = ''; my @main_host = ('unknown', 0); my @main_host_duration = ('unknown', 0); foreach my $d (sort keys %{$host_info{$curdb}}) { $query_host_info .= "$dTotal" . &comma_numbers($host_info{$curdb}{$d}{count}) . "" . &convert_time($host_info{$curdb}{$d}{duration}) . ""; $total_count += $host_info{$curdb}{$d}{count}; if ($main_host[1] < $host_info{$curdb}{$d}{count}) { $main_host[0] = $d; $main_host[1] = $host_info{$curdb}{$d}{count}; } if ($main_host_duration[1] < $host_info{$curdb}{$d}{duration}) { $main_host_duration[0] = $d; $main_host_duration[1] = $host_info{$curdb}{$d}{duration}; } foreach my $r (sort keys %{$host_info{$curdb}{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_host_info .= "$r" . &comma_numbers($host_info{$curdb}{$d}{$r}) . "" . &convert_time($host_info{$curdb}{$d}{"$r|duration"}) . ""; } } $query_host_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %{$host_info{$curdb}}) { if ((($host_info{$curdb}{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $host_info{$curdb}{$d}{count} || 0; } else { $infos{"Sum queries per hosts < $pie_percentage_limit%"} += $host_info{$curdb}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per hosts < $pie_percentage_limit%"}; delete $infos{"Sum queries per hosts < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyhost_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyhost', 'Queries per host', %infos); $main_host[1] = &comma_numbers($main_host[1]); $main_host_duration[1] = &convert_time($main_host_duration[1]); print $fh qq{

Queries by host

Key values

  • $main_host[0] Main host
  • $main_host[1] Requests
  • $main_host_duration[1] ($main_host_duration[0])
  • Main time consuming host
$drawn_graphs{queriesbyhost_graph}
$query_host_info
Host Request type Count Duration
}; delete $drawn_graphs{queriesbyhost_graph}; } sub display_plan { my ($id, $plan) = @_; my $url = $EXPLAIN_URL . url_escape($plan); # Only TEXT format plan can be sent to Depesz site. if ($plan !~ /Node Type:|"Node Type":|Node-Type/s) { return "
Explain plan
\n
\n
" . $plan . "
\n
\n"; } else { return "
Explain plan
\n
\n
" . $plan . "
\n
\n"; } } sub print_lock_queries_report { my $curdb = shift; my @top_locked_queries = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{locks})) { push (@top_locked_queries, [$h, $normalyzed_info{$curdb}{$h}{locks}{count}, $normalyzed_info{$curdb}{$h}{locks}{wait}, $normalyzed_info{$curdb}{$h}{locks}{minwait}, $normalyzed_info{$curdb}{$h}{locks}{maxwait}]); } } # Most frequent waiting queries (N) @top_locked_queries = sort {$b->[2] <=> $a->[2]} @top_locked_queries; print $fh qq{

Most frequent waiting queries (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_locked_queries ; $i++) { my $count = &comma_numbers($top_locked_queries[$i]->[1]); my $total_time = &convert_time($top_locked_queries[$i]->[2]); my $min_time = &convert_time($top_locked_queries[$i]->[3]); my $max_time = &convert_time($top_locked_queries[$i]->[4]); my $avg_time = &convert_time($top_locked_queries[$i]->[2] / ($top_locked_queries[$i]->[1] || 1)); my $query = &highlight_code(&anonymize_query($top_locked_queries[$i]->[0])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_locked_queries[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_locked_queries[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } if ($#top_locked_queries == -1) { print $fh qq{}; } print $fh qq{
Rank Count Total time Min time Max time Avg duration Query
$rank $count $total_time $min_time $max_time $avg_time
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code(&anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query})); $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Date: $normalyzed_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{duration}); $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$curdb}{$k}{samples}{$d}{plan}) { $explain = &display_plan("query-a-explain-$rank", $normalyzed_info{$curdb}{$k}{samples}{$d}{plan}); } print $fh qq{
$query
$md5
$details
$explain
}; $idx++; } print $fh qq{

$NODATA
}; @top_locked_queries = (); # Queries that waited the most @{$top_locked_info{$curdb}} = sort {$b->[1] <=> $a->[1]} @{$top_locked_info{$curdb}}; print $fh qq{

Queries that waited the most

}; $rank = 1; for (my $i = 0 ; $i <= $#{$top_locked_info{$curdb}} ; $i++) { my $query = &highlight_code(&anonymize_query($top_locked_info{$curdb}[$i]->[2])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_locked_info{$curdb}[$i]->[2]) if ($enable_checksum); my $details = "[ Date: " . ($top_locked_info{$curdb}[$i]->[1] || ''); $details .= " - Database: $top_locked_info{$curdb}[$i]->[3]" if ($top_locked_info{$curdb}[$i]->[3]); $details .= " - User: $top_locked_info{$curdb}[$i]->[4]" if ($top_locked_info{$curdb}[$i]->[4]); $details .= " - Remote: $top_locked_info{$curdb}[$i]->[5]" if ($top_locked_info{$curdb}[$i]->[5]); $details .= " - Application: $top_locked_info{$curdb}[$i]->[6]" if ($top_locked_info{$curdb}[$i]->[6]); $details .= " - Bind query: yes" if ($top_locked_info{$curdb}[$i]->[7]); $details .= " ]"; my $time = &convert_time($top_locked_info{$curdb}[$i]->[0]); print $fh qq{ }; $rank++; } if ($#{$top_locked_info{$curdb}} == -1) { print $fh qq{}; } print $fh qq{
Rank Wait time Query
$rank $time
$query
$md5
$details
$NODATA
}; } sub print_tempfile_report { my $curdb = shift; my @top_temporary = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{tempfiles})) { push (@top_temporary, [$h, $normalyzed_info{$curdb}{$h}{tempfiles}{count}, $normalyzed_info{$curdb}{$h}{tempfiles}{size}, $normalyzed_info{$curdb}{$h}{tempfiles}{minsize}, $normalyzed_info{$curdb}{$h}{tempfiles}{maxsize}]); } } # Queries generating the most temporary files (N) if ($#top_temporary >= 0) { @top_temporary = sort { $b->[1] <=> $a->[1] } @top_temporary; print $fh qq{

Queries generating the most temporary files (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_temporary ; $i++) { my $count = &comma_numbers($top_temporary[$i]->[1]); my $total_size = &pretty_print_size($top_temporary[$i]->[2]); my $min_size = &pretty_print_size($top_temporary[$i]->[3]); my $max_size = &pretty_print_size($top_temporary[$i]->[4]); my $avg_size = &pretty_print_size($top_temporary[$i]->[2] / ($top_temporary[$i]->[1] || 1)); my $query = &highlight_code(&anonymize_query($top_temporary[$i]->[0])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_temporary[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_temporary[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } print $fh qq{
Rank Count Total size Min size Max size Avg size Query
$rank $count $total_size $min_size $max_size $avg_size
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code(&anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Date: " . $normalyzed_info{$curdb}{$k}{samples}{$d}{date} . "\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); if (exists $top_tempfile_info{$curdb} && $#{$top_tempfile_info{$curdb}} >= $i) { $details .= "Info: $top_tempfile_info{$curdb}[$i]->[7]" if ($top_tempfile_info{$curdb}[$i]->[7]); } print $fh qq{
$query
$md5
$details
}; $idx++ } print $fh qq{

}; @top_temporary = (); } # Top queries generating the largest temporary files if ($#{$top_tempfile_info{$curdb}} >= 0) { @{$top_tempfile_info{$curdb}} = sort {$b->[0] <=> $a->[0]} @{$top_tempfile_info{$curdb}}; my $largest = &comma_numbers($top_temporary[0]->[0]); print $fh qq{

Queries generating the largest temporary files

}; my $rank = 1; for (my $i = 0 ; $i <= $#{$top_tempfile_info{$curdb}} ; $i++) { my $size = &pretty_print_size($top_tempfile_info{$curdb}[$i]->[0]); my $details = "[ Date: $top_tempfile_info{$curdb}[$i]->[1]"; $details .= " - Database: $top_tempfile_info{$curdb}[$i]->[3]" if ($top_tempfile_info{$curdb}[$i]->[3]); $details .= " - User: $top_tempfile_info{$curdb}[$i]->[4]" if ($top_tempfile_info{$curdb}[$i]->[4]); $details .= " - Remote: $top_tempfile_info{$curdb}[$i]->[5]" if ($top_tempfile_info{$curdb}[$i]->[5]); $details .= " - Application: $top_tempfile_info{$curdb}[$i]->[6]" if ($top_tempfile_info{$curdb}[$i]->[6]); $details .= " ]"; $details .= "\nInfo: $top_tempfile_info{$curdb}[$i]->[7]" if ($top_tempfile_info{$curdb}[$i]->[7]); my $query = &highlight_code(&anonymize_query($top_tempfile_info{$curdb}[$i]->[2])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_tempfile_info{$curdb}[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $rank++; } print $fh qq{
Rank Size Query
$rank $size
$query
$md5
$details
}; @{$top_tempfile_info{$curdb}} = (); } } sub print_cancelled_report { my $curdb = shift(); my @top_cancelled = (); foreach my $h (keys %{$normalyzed_info{$curdb}}) { if (exists($normalyzed_info{$curdb}{$h}{cancelled})) { push (@top_cancelled, [$h, $normalyzed_info{$curdb}{$h}{cancelled}{count}]); } } # Queries generating the most cancellation (N) if ($#top_cancelled >= 0) { @top_cancelled = sort {$b->[1] <=> $a->[1]} @top_cancelled; print $fh qq{

Queries generating the most cancellation (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_cancelled ; $i++) { my $count = &comma_numbers($top_cancelled[$i]->[1]); my $query = &highlight_code($top_cancelled[$i]->[0]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_cancelled[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_cancelled[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } print $fh qq{
Rank Count Query
$rank $count
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code($normalyzed_info{$curdb}{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Duration: " . &convert_time($d) . "
"; $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}
" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}
" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}
" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}
" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $details .= "Bind query: yes
" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); print $fh qq{
$query
$md5
$details
}; $idx++ } print $fh qq{

}; @top_cancelled = (); } # Top queries generating the most cancellation if ($#{$top_cancelled_info{$curdb}} >= 0) { @{$top_cancelled_info{$curdb}} = sort {$b->[0] <=> $a->[0]} @{$top_cancelled_info{$curdb}}; my $largest = &comma_numbers($top_cancelled_info{$curdb}[0]->[0]); print $fh qq{

Queries most cancelled

}; my $rank = 1; for (my $i = 0 ; $i <= $#{$top_cancelled_info{$curdb}} ; $i++) { my $count = &comma_numbers($top_cancelled_info{$curdb}[$i]->[0]); my $details = "[ Date: $top_cancelled_info{$curdb}[$i]->[1]"; $details .= " - Database: $top_cancelled_info{$curdb}[$i]->[3]" if ($top_cancelled_info{$curdb}[$i]->[3]); $details .= " - User: $top_cancelled_info{$curdb}[$i]->[4]" if ($top_cancelled_info{$curdb}[$i]->[4]); $details .= " - Remote: $top_cancelled_info{$curdb}[$i]->[5]" if ($top_cancelled_info{$curdb}[$i]->[5]); $details .= " - Application: $top_cancelled_info{$curdb}[$i]->[6]" if ($top_cancelled_info{$curdb}[$i]->[6]); $details .= " - Bind yes: yes" if ($top_cancelled_info{$curdb}[$i]->[7]); $details .= " ]"; my $query = &highlight_code(&anonymize_query($top_cancelled_info{$curdb}[$i]->[2])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_cancelled_info{$curdb}[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $rank++; } print $fh qq{
Rank Number Query
$rank $count
$query
$md5
$details
}; @{$top_cancelled_info{$curdb}} = (); } } sub print_histogram_query_times { my $curdb = shift; my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_query_time; $i++) { $histogram_info .= "$histogram_query_time[$i-1]-$histogram_query_time[$i]ms" . &comma_numbers($overall_stat{$curdb}{histogram}{query_time}{$histogram_query_time[$i-1]}) . "" . sprintf("%0.2f", ($overall_stat{$curdb}{histogram}{query_time}{$histogram_query_time[$i-1]} * 100) / ($overall_stat{$curdb}{histogram}{query_total}||1)) . "%"; $data{"$histogram_query_time[$i-1]-$histogram_query_time[$i]ms"} = ($overall_stat{$curdb}{histogram}{query_time}{$histogram_query_time[$i-1]} || 0); if ($overall_stat{$curdb}{histogram}{query_time}{$histogram_query_time[$i-1]} > $most_range_value) { $most_range = "$histogram_query_time[$i-1]-$histogram_query_time[$i]ms"; $most_range_value = $overall_stat{$curdb}{histogram}{query_time}{$histogram_query_time[$i-1]}; } } if ($overall_stat{$curdb}{histogram}{query_total} > 0) { $data{"> $histogram_query_time[-1]ms"} = ($overall_stat{$curdb}{histogram}{query_time}{"-1"} || 0); $histogram_info .= " > $histogram_query_time[-1]ms" . &comma_numbers($overall_stat{$curdb}{histogram}{query_time}{'-1'}) . "" . sprintf("%0.2f", ($overall_stat{$curdb}{histogram}{query_time}{'-1'} * 100) / ($overall_stat{$curdb}{histogram}{query_total}||1)) . "%"; $data{"> $histogram_query_time[-1]ms"} = $overall_stat{$curdb}{histogram}{query_time}{"-1"} if ($overall_stat{$curdb}{histogram}{query_time}{"-1"} > 0); if ($overall_stat{$curdb}{histogram}{query_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_query_time[-1]ms"; $most_range_value = $overall_stat{$curdb}{histogram}{query_time}{"-1"}; } } else { $histogram_info = qq{$NODATA}; } $drawn_graphs{histogram_query_times_graph} = $NODATA; if ($graph) { $drawn_graphs{histogram_query_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_histogram_query_times', 'Queries', \@histogram_query_time, %data); } $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Top Queries

Histogram of query times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{histogram_query_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{histogram_query_times_graph}; } sub print_slowest_individual_queries { my $curdb = shift; print $fh qq{

Slowest individual queries

}; my $idx = 1; for (my $i = 0 ; $i <= $#{$top_slowest{$curdb}} ; $i++) { my $rank = $i + 1; my $duration = &convert_time($top_slowest{$curdb}[$i]->[0]); my $date = $top_slowest{$curdb}[$i]->[1] || ''; my $details = "[ Date: " . ($top_slowest{$curdb}[$i]->[1] || ''); $details .= " - Database: $top_slowest{$curdb}[$i]->[3]" if ($top_slowest{$curdb}[$i]->[3]); $details .= " - User: $top_slowest{$curdb}[$i]->[4]" if ($top_slowest{$curdb}[$i]->[4]); $details .= " - Remote: $top_slowest{$curdb}[$i]->[5]" if ($top_slowest{$curdb}[$i]->[5]); $details .= " - Application: $top_slowest{$curdb}[$i]->[6]" if ($top_slowest{$curdb}[$i]->[6]); $details .= " - Bind query: yes" if ($top_slowest{$curdb}[$i]->[7]); $details .= " ]"; my $explain = ''; if ($top_slowest{$curdb}[$i]->[8]) { $explain = &display_plan("query-d-explain-$rank-$idx", $top_slowest{$curdb}[$i]->[8]); } my $query = &highlight_code(&anonymize_query($top_slowest{$curdb}[$i]->[2])); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_slowest{$curdb}[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $idx++; } if ($#{$top_slowest{$curdb}} == -1) { print $fh qq{}; } print $fh qq{
Rank Duration Query
$rank $duration
$query
$md5
$details
$explain
$NODATA
}; } sub print_time_consuming { my $curdb = shift; print $fh qq{

Time consuming queries (N)

}; my $rank = 1; my $found = 0; foreach my $k (sort {$normalyzed_info{$curdb}{$b}{duration} <=> $normalyzed_info{$curdb}{$a}{duration}} keys %{$normalyzed_info{$curdb}}) { next if (!$normalyzed_info{$curdb}{$k}{count} || !exists $normalyzed_info{$curdb}{$k}{duration}); last if ($rank > $top); $found++; $normalyzed_info{$curdb}{$k}{average} = $normalyzed_info{$curdb}{$k}{duration} / $normalyzed_info{$curdb}{$k}{count}; my $duration = &convert_time($normalyzed_info{$curdb}{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$curdb}{$k}{count}); my $min = &convert_time($normalyzed_info{$curdb}{$k}{min}); my $max = &convert_time($normalyzed_info{$curdb}{$k}{max}); my $avg = &convert_time($normalyzed_info{$curdb}{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}}) { $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; my $dd = $3; my $mo = $2 -1 ; my $y = $1 - 1900; foreach my $h (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}}) { my $t = timegm_nocheck(0, 0, $h, $dd, $mo, $y); $t += $timezone; my $ht = sprintf("%02d", (localtime($t))[2]); $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration} / ($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count} || 1); $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$ht:$rd"} += $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$ht:$rd"} += ($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$ht:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $users_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { $apps_involved = qq{}; } my $query_histo = ''; if ($graph) { $query_histo = &jqplot_histograph($graphid++, 'time_consuming_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); } print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Total duration Times executed Min duration Max duration Avg duration Query
$zday$ht" . &comma_numbers($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average}) . "
$zday$ht:$rd" . &comma_numbers($hourly_count{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}/($hourly_count{"$ht:$rd"}||1)) . "
$rank $duration $count

Details

$min $max $avg
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{users}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{users}}) { if ($normalyzed_info{$curdb}{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{apps}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{apps}}) { if ($normalyzed_info{$curdb}{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$curdb}{$k}{samples}{$d}{plan}) { $explain = &display_plan("query-e-explain-$rank-$idx", $normalyzed_info{$curdb}{$k}{samples}{$d}{plan}); } $query = &highlight_code(&anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain
}; $idx++; } print $fh qq{

$NODATA
}; } sub print_most_frequent { my $curdb = shift; print $fh qq{

Most frequent queries (N)

}; my $rank = 1; foreach my $k (sort { $normalyzed_info{$curdb}{$b}{count} <=> $normalyzed_info{$curdb}{$a}{count} or $normalyzed_info{$curdb}{$b}{duration} <=> $normalyzed_info{$curdb}{$a}{duration} } keys %{$normalyzed_info{$curdb}}) { next if (!$normalyzed_info{$curdb}{$k}{count}); last if ($rank > $top); $normalyzed_info{$curdb}{$k}{average} = $normalyzed_info{$curdb}{$k}{duration} / $normalyzed_info{$curdb}{$k}{count}; my $duration = &convert_time($normalyzed_info{$curdb}{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$curdb}{$k}{count}); my $min = &convert_time($normalyzed_info{$curdb}{$k}{min}); my $max = &convert_time($normalyzed_info{$curdb}{$k}{max}); my $avg = &convert_time($normalyzed_info{$curdb}{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my %hourly_count = (); my %hourly_duration = (); my $days = 0; my $details = ''; foreach my $d (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}}) { $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; my $dd = $3; my $mo = $2 - 1; my $y = $1 - 1900; foreach my $h (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}}) { my $t = timegm_nocheck(0, 0, $h, $dd, $mo, $y); $t += $timezone; my $ht = sprintf("%02d", (localtime($t))[2]); $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count}; $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$ht:$rd"} += $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$ht:$rd"} += ($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$ht:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $query_histo = ''; if ($graph) { $query_histo = &jqplot_histograph($graphid++, 'most_frequent_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); } my $users_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { $apps_involved = qq{}; } print $fh qq{ }; $rank++; } if (scalar keys %{$normalyzed_info{$curdb}} == 0) { print $fh qq{}; } print $fh qq{
Rank Times executed Total duration Min duration Max duration Avg duration Query
$zday$ht" . &comma_numbers($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average}) . "
$zday$ht:$rd" . &comma_numbers($hourly_count{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}/($hourly_count{"$ht:$rd"}||1)) . "
$rank $count

Details

$duration $min $max $avg
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{users}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{users}}) { if ($normalyzed_info{$curdb}{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{apps}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{apps}}) { if ($normalyzed_info{$curdb}{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$curdb}{$k}{samples}{$d}{plan}) { $explain = &display_plan("query-f-explain-$rank-$idx", $normalyzed_info{$curdb}{$k}{samples}{$d}{plan}); } $query = &highlight_code(&anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain
}; $idx++; } print $fh qq{

$NODATA
}; } sub print_slowest_queries { my $curdb = shift; print $fh qq{

Normalized slowest queries (N)

}; my $rank = 1; my $found = 0; foreach my $k (sort {$normalyzed_info{$curdb}{$b}{average} <=> $normalyzed_info{$curdb}{$a}{average}} keys %{$normalyzed_info{$curdb}}) { next if (!$k || !$normalyzed_info{$curdb}{$k}{count} || !exists $normalyzed_info{$curdb}{$k}{duration}); last if ($rank > $top); $found++; $normalyzed_info{$curdb}{$k}{average} = $normalyzed_info{$curdb}{$k}{duration} / $normalyzed_info{$curdb}{$k}{count}; my $duration = &convert_time($normalyzed_info{$curdb}{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$curdb}{$k}{count}); my $min = &convert_time($normalyzed_info{$curdb}{$k}{min}); my $max = &convert_time($normalyzed_info{$curdb}{$k}{max}); my $avg = &convert_time($normalyzed_info{$curdb}{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}}) { my $c = 1; $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; my $dd = $3; my $mo = $2 - 1; my $y = $1 - 1900; foreach my $h (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}}) { my $t = timegm_nocheck(0, 0, $h, $dd, $mo, $y); $t += $timezone; my $ht = sprintf("%02d", (localtime($t))[2]); $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count}; $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$ht:$rd"} += $normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$ht:$rd"} += ($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$ht:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $query_histo = ''; if ($graph) { $query_histo = &jqplot_histograph($graphid++, 'normalized_slowest_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); } my $users_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { $apps_involved = qq{}; } print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Min duration Max duration Avg duration Times executed Total duration Query
$zday$ht" . &comma_numbers($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$curdb}{$k}{chronos}{$d}{$h}{average}) . "
$zday$ht:$rd" . &comma_numbers($hourly_count{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}/($hourly_count{"$ht:$rd"}||1)) . "
$rank $min $max $avg $count

Details

$duration
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved

}; if (scalar keys %{$normalyzed_info{$curdb}{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{users}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{users}}) { if ($normalyzed_info{$curdb}{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$curdb}{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$curdb}{$k}{apps}{$b}{duration} <=> $normalyzed_info{$curdb}{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$curdb}{$k}{apps}}) { if ($normalyzed_info{$curdb}{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$curdb}{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$curdb}{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$curdb}{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$curdb}{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$curdb}{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$curdb}{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$curdb}{$k}{samples}{$d}{plan}) { $explain = &display_plan("query-g-explain-$rank-$idx", $normalyzed_info{$curdb}{$k}{samples}{$d}{plan}); } $query = &highlight_code(&anonymize_query($normalyzed_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain
}; $idx++; } print $fh qq{

$NODATA
}; } sub print_prepare_consuming { my $curdb = shift; print $fh qq{

Time consuming prepare

}; my $rank = 1; my $found = 0; foreach my $k (sort {$prepare_info{$curdb}{$b}{duration} <=> $prepare_info{$curdb}{$a}{duration}} keys %{$prepare_info{$curdb}}) { next if (!$prepare_info{$curdb}{$k}{count} || !exists $prepare_info{$curdb}{$k}{duration}); last if ($rank > $top); $found++; $prepare_info{$curdb}{$k}{average} = $prepare_info{$curdb}{$k}{duration} / $prepare_info{$curdb}{$k}{count}; my $duration = &convert_time($prepare_info{$curdb}{$k}{duration}); my $count = &comma_numbers($prepare_info{$curdb}{$k}{count}); my $min = &convert_time($prepare_info{$curdb}{$k}{min}); my $max = &convert_time($prepare_info{$curdb}{$k}{max}); my $avg = &convert_time($prepare_info{$curdb}{$k}{average}); my $query = &highlight_code(&anonymize_query($k)); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$prepare_info{$curdb}{$k}{chronos}}) { $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; my $dd = $3; my $mo = $2 -1 ; my $y = $1 - 1900; foreach my $h (sort keys %{$prepare_info{$curdb}{$k}{chronos}{$d}}) { my $t = timegm_nocheck(0, 0, $h, $dd, $mo, $y); $t += $timezone; my $ht = sprintf("%02d", (localtime($t))[2]); $prepare_info{$curdb}{$k}{chronos}{$d}{$h}{average} = $prepare_info{$curdb}{$k}{chronos}{$d}{$h}{duration} / ($prepare_info{$curdb}{$k}{chronos}{$d}{$h}{count} || 1); $details .= ""; $zday = ""; foreach my $m (sort keys %{$prepare_info{$curdb}{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$ht:$rd"} += $prepare_info{$curdb}{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$ht:$rd"} += ($prepare_info{$curdb}{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$ht:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $users_involved = ''; if (scalar keys %{$prepare_info{$curdb}{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$prepare_info{$curdb}{$k}{apps}} > 0) { $apps_involved = qq{}; } my $query_histo = ''; if ($graph) { $query_histo = &jqplot_histograph($graphid++, 'time_consuming_prepare_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); } print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Total duration Times executed Min duration Max duration Avg duration Query
$zday$ht" . &comma_numbers($prepare_info{$curdb}{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($prepare_info{$curdb}{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($prepare_info{$curdb}{$k}{chronos}{$d}{$h}{average}) . "
$zday$ht:$rd" . &comma_numbers($hourly_count{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}/($hourly_count{"$ht:$rd"}||1)) . "
$rank $duration $count

Details

$min $max $avg
$query
$md5

Times Reported Time consuming prepare #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$prepare_info{$curdb}{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$prepare_info{$curdb}{$k}{users}{$b}{duration} <=> $prepare_info{$curdb}{$k}{users}{$a}{duration}} keys %{$prepare_info{$curdb}{$k}{users}}) { if ($prepare_info{$curdb}{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($prepare_info{$curdb}{$k}{users}{$u}{duration}); $details .= " - Times executed: $prepare_info{$curdb}{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$prepare_info{$curdb}{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$prepare_info{$curdb}{$k}{apps}{$b}{duration} <=> $prepare_info{$curdb}{$k}{apps}{$a}{duration}} keys %{$prepare_info{$curdb}{$k}{apps}}) { if ($prepare_info{$curdb}{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($prepare_info{$curdb}{$k}{apps}{$u}{duration}); $details .= " - Times executed: $prepare_info{$curdb}{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$prepare_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $prepare_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $prepare_info{$curdb}{$k}{samples}{$d}{db}\n" if ($prepare_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $prepare_info{$curdb}{$k}{samples}{$d}{user}\n" if ($prepare_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $prepare_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($prepare_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $prepare_info{$curdb}{$k}{samples}{$d}{app}\n" if ($prepare_info{$curdb}{$k}{samples}{$d}{app}); $details .= "parameters: ". &anonymize_query($prepare_info{$curdb}{$k}{samples}{$d}{params}) . "\n" if ($prepare_info{$curdb}{$k}{samples}{$d}{params}); $query = &highlight_code(&anonymize_query($prepare_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($prepare_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
}; $idx++; } print $fh qq{

$NODATA
}; } sub print_bind_consuming { my $curdb = shift; print $fh qq{

Time consuming bind

}; my $rank = 1; my $found = 0; foreach my $k (sort {$bind_info{$curdb}{$b}{duration} <=> $bind_info{$curdb}{$a}{duration}} keys %{$bind_info{$curdb}}) { next if (!$bind_info{$curdb}{$k}{count} || !exists $bind_info{$curdb}{$k}{duration}); last if ($rank > $top); $found++; $bind_info{$curdb}{$k}{average} = $bind_info{$curdb}{$k}{duration} / $bind_info{$curdb}{$k}{count}; my $duration = &convert_time($bind_info{$curdb}{$k}{duration}); my $count = &comma_numbers($bind_info{$curdb}{$k}{count}); my $min = &convert_time($bind_info{$curdb}{$k}{min}); my $max = &convert_time($bind_info{$curdb}{$k}{max}); my $avg = &convert_time($bind_info{$curdb}{$k}{average}); my $query = &highlight_code(&anonymize_query($k)); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$bind_info{$curdb}{$k}{chronos}}) { $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; my $dd = $3; my $mo = $2 -1 ; my $y = $1 - 1900; foreach my $h (sort keys %{$bind_info{$curdb}{$k}{chronos}{$d}}) { my $t = timegm_nocheck(0, 0, $h, $dd, $mo, $y); $t += $timezone; my $ht = sprintf("%02d", (localtime($t))[2]); $bind_info{$curdb}{$k}{chronos}{$d}{$h}{average} = $bind_info{$curdb}{$k}{chronos}{$d}{$h}{duration} / ($bind_info{$curdb}{$k}{chronos}{$d}{$h}{count} || 1); $details .= ""; $zday = ""; foreach my $m (sort keys %{$bind_info{$curdb}{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$ht:$rd"} += $bind_info{$curdb}{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$ht:$rd"} += ($bind_info{$curdb}{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$ht:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $users_involved = ''; if (scalar keys %{$bind_info{$curdb}{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$bind_info{$curdb}{$k}{apps}} > 0) { $apps_involved = qq{}; } my $query_histo = ''; if ($graph) { $query_histo = &jqplot_histograph($graphid++, 'time_consuming_bind_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); } print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Total duration Times executed Min duration Max duration Avg duration Query
$zday$ht" . &comma_numbers($bind_info{$curdb}{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($bind_info{$curdb}{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($bind_info{$curdb}{$k}{chronos}{$d}{$h}{average}) . "
$zday$ht:$rd" . &comma_numbers($hourly_count{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}) . "" . &convert_time($hourly_duration{"$ht:$rd"}/($hourly_count{"$ht:$rd"}||1)) . "
$rank $duration $count

Details

$min $max $avg
$query
$md5

Times Reported Time consuming bind #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$bind_info{$curdb}{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$bind_info{$curdb}{$k}{users}{$b}{duration} <=> $bind_info{$curdb}{$k}{users}{$a}{duration}} keys %{$bind_info{$curdb}{$k}{users}}) { if ($bind_info{$curdb}{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($bind_info{$curdb}{$k}{users}{$u}{duration}); $details .= " - Times executed: $bind_info{$curdb}{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$bind_info{$curdb}{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$bind_info{$curdb}{$k}{apps}{$b}{duration} <=> $bind_info{$curdb}{$k}{apps}{$a}{duration}} keys %{$bind_info{$curdb}{$k}{apps}}) { if ($bind_info{$curdb}{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($bind_info{$curdb}{$k}{apps}{$u}{duration}); $details .= " - Times executed: $bind_info{$curdb}{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$bind_info{$curdb}{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $bind_info{$curdb}{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $bind_info{$curdb}{$k}{samples}{$d}{db}\n" if ($bind_info{$curdb}{$k}{samples}{$d}{db}); $details .= "User: $bind_info{$curdb}{$k}{samples}{$d}{user}\n" if ($bind_info{$curdb}{$k}{samples}{$d}{user}); $details .= "Remote: $bind_info{$curdb}{$k}{samples}{$d}{remote}\n" if ($bind_info{$curdb}{$k}{samples}{$d}{remote}); $details .= "Application: $bind_info{$curdb}{$k}{samples}{$d}{app}\n" if ($bind_info{$curdb}{$k}{samples}{$d}{app}); $details .= "parameters: " . &anonymize_query($bind_info{$curdb}{$k}{samples}{$d}{params}) . "\n" if ($bind_info{$curdb}{$k}{samples}{$d}{params}); $query = &highlight_code(&anonymize_query($bind_info{$curdb}{$k}{samples}{$d}{query})); my $md5 = ''; $md5 = 'md5: ' . md5_hex($bind_info{$curdb}{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
}; $idx++; } print $fh qq{

$NODATA
}; } sub dump_as_html { my $uri = shift; my $curdb = shift; # Dump the html header &html_header($uri, $curdb); # Set graphs limits if ($overall_stat{$curdb}{'first_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/) { my ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s); if (!$log_timezone) { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = ($1, $2, $3, $4, $5, $6); } else { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = change_timezone($1, $2, $3, $4, $5, $6); } $t_min = timegm_nocheck(0, $t_mi, $t_h, $t_d, $t_mo - 1, $t_y) * 1000; $t_min += ($timezone*1000); $t_min -= ($avg_minutes * 60000); } if ($overall_stat{$curdb}{'last_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/) { my ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s); if (!$log_timezone) { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = ($1, $2, $3, $4, $5, $6); } else { ($t_y, $t_mo, $t_d, $t_h, $t_mi, $t_s) = change_timezone($1, $2, $3, $4, $5, $6); } $t_max = timegm_nocheck(59, $t_mi, $t_h, $t_d, $t_mo - 1, $t_y) * 1000; $t_max += ($timezone*1000); $t_max += ($avg_minutes * 60000); } if (!$error_only) { if (!$pgbouncer_only) { # Overall statistics print $fh qq{
  • }; &print_overall_statistics($curdb); } if (!$disable_hourly && !$pgbouncer_only) { # Build graphs based on hourly stat &compute_query_graphs($curdb); # Show global SQL traffic &print_sql_traffic($curdb); # Show hourly statistics &print_general_activity($curdb); } if (!$disable_connection && !$pgbouncer_only) { print $fh qq{