pax_global_header00006660000000000000000000000064131366217020014513gustar00rootroot0000000000000052 comment=87a283aed3f4eef00974a04b476ea8aa63d35224 pgbadger-9.2/000077500000000000000000000000001313662170200131405ustar00rootroot00000000000000pgbadger-9.2/.gitignore000066400000000000000000000000231313662170200151230ustar00rootroot00000000000000# Swap files *.swp pgbadger-9.2/.perltidyrc000066400000000000000000000007421313662170200153250ustar00rootroot00000000000000--backup-and-modify-in-place --backup-file-extension=beforeTidy --block-brace-tightness=2 --brace-tightness=2 --closing-token-indentation=1 --continuation-indentation=4 --indent-columns=4 --maximum-line-length=134 --cuddled-else --opening-sub-brace-on-new-line --noopening-brace-on-new-line --nooutdent-labels --paren-tightness=2 --square-bracket-tightness=2 --vertical-tightness=0 --vertical-tightness-closing=0 --break-at-old-comma-breakpoints --entab-leading-whitespace=4 --tabs pgbadger-9.2/CONTRIBUTING.md000066400000000000000000000005331313662170200153720ustar00rootroot00000000000000# How to contribute # ##Before Submitting an issue## 1. Upgrade to the latest version of pgBadger and see if the problem remains 2. Look at the [closed issues](https://github.com/dalibo/pgbadger/issues?state=closed), we may have already answered a similar problem 3. [Read the doc](http://dalibo.github.com/pgbadger/). It is short and useful. pgbadger-9.2/ChangeLog000066400000000000000000002706211313662170200147220ustar00rootroot000000000000002017-07-27 - v9.2 This release of pgBadger is a maintenance release that adds some new features. * Add report of checkpoint distance and estimate. * Add support of AWS Redshift keywords to SQL code beautifier. * Add autodetection of log format in remote mode to allow remote parsing of pgbouncer log file together with PostgreSQL log file. There's also some bugs fixes and features enhancements. - Fix reports with histogram that was not showing data upper than the last range. - Fix parsing of journalctl without the the log line number pattern ([%l-n]). Thanks to Christian Schmitt for the report. - Add report of checkpoint distance and estimate. Thanks to jjsantam for the feature request. - Append more information on what is done by script to update CSS and javascript files, tools/updt_embedded_rsc.pl. - Do not warn when all log files are empty and exit with code 0. - Fix build_log_line_prefix_regex() that does not include %n as a lookup in %regex_map. Thanks to ghosthound for the patch. - Change error level of "FATAL: cannot use CSV" to WARNING. Thanks to kong1man for the report. - Fix use of uninitialized value warning. Thanks to Payal for the report. - Add permission denied to error normalization - Update pgbadger to latest commit 5bdc018 of pgFormatter. - Add support for AWS Redshift keywords. Thanks to cavanaug for the feature request. - Fix missing query in temporary file report when the query was canceled. Thanks to Fabrizio de Royes Mello for the report. - Normalize query with binded parameters, replaced with a ?. - Sanity check to avoid end time before start time. Thanks to Christophe Courtois for the patch. - Fix a lot of mystyped words and do some grammatical fixes. Use 'pgBadger' where it refers to the program and not the binary file. Also, use "official" expressions such as PgBouncer, GitHub, and CSS. POD file was synced with README. Thanks to Euler Taveira for the patch. - Menu is broken when --disable-type top_cancelled_info test and closing list must be inside disable_type test. While in it, ident disable_lock test. Thanks to Euler Taveira for the patch. - Fix use of uninitialized value. Thanks to johnkrugger for the report. - Remove test to read log file during log format auto-detection when the file is hosted remotly. Thanks to clomdd for the report. - Add autodetection of log format in remote mode to allow remote parsing of pgbouncer log file together with PostgreSQL log file. - Fix number of sessions wrongly increased after log line validation Thanks to Achilleas Mantzios for the report. - Minor reformatting of the pgBadger Description. - Fix repeated info in documentation. Thanks to cscatolini for the patch. 2017-01-24 - v9.1 This release of pgBadger is a maintenance release that adds some new features. * Add report of error class distribution when SQLState is available in the log_line_prefix (see %e placeholder). * Update SQL Beautifier to pgFormatter v1.6 code. * Improve error message normalization. * Add --normalized-only option to generate a text file containing all normalized queries found in a log with count. * Allow %c (session id) to replace %p (pid) as unique session id. * Add waiting for lock messages to event reports. * Add --start-monday option to start calendar weeks in Monday instead of default to Sunday. There's also some bugs fixes and features enhancements. - Add report of error class distribution when SQLState is available in the log line prefix. Thanks to jacks33 for the feature request. - Fix incremental global index on resize. Thanks to clomdd for the report. - Fix command tag log_line_prefix placeholder %i to allow space character. - Fix --exclude-line options and removing of obsolete directory when retention is enabled and --noreport is used. - Fix typo in "vacuum activity table". Thanks to Nicolas Gollet for the patch. - Fix autovacuum report. Thanks to Nicolas Gollet for the patch. - Fix author of pgbadger's logo - Damien Cazeils and English in comments. Thanks to Thibaut Madelaine for the patch. - Add information about pgbouncer log format in the -f option. Thanks to clomdd for the report. - Add --normalized-only information in documentation. - Fix broken report of date-time introduced in previous patch. - Fix duration/query association when log_duration=on and log_statement=all. Thanks to Eric Jensen for the report. - Fix normalization of messages about advisory lock. Thanks to Thibaut Madelaine for the report. - Fix report of auto_explain output. Thanks to fch77700 for the report. - Fix unwanted log format auto detection with log entry from stdin. Thanks to Jesus Adolfo Parra for the report. - Add left open parentheses to the "stop" chars of regex to look for db client in the prefix to handle the PostgreSQL client string format that includes source port. Thanks to Jon Nelson for the patch. - Fix some spelling errors. Thanks to Jon Nelson for the patch. - Allow %c (session id) to replace %p (pid) as unique session id. Thanks to Jerryliuk for the report. - Allow pgbadger to parse default log_line_prefix that will be probably used in 10.0: '%m [%p] ' - Fix missing first line with interpreter call. - Fix missing Avg values in CSV report. Thanks to Yosuke Tomita for the report. - Fix error message in autodetect_format() method. - Add --start-monday option to start calendar weeks in Monday instead of default to Sunday. Thanks to Joosep Mae for the feature request. - Fix --histo-average option. Thanks to Yves Martin for the report. - Remove plural form of --ssh-option in documentation. Thanks to mark-a-s for the report. - Fix --exclude-time filter and rewrite code to skip unwanted line as well code to update the progress bar. Thanks to Michael Chesterton for the report. - Fix support to %r placeholder in prefix instead of %h. 2016-09-02 - v9.0 This major release of pgBadger is a port to bootstrap 3 and a version upgrade of all resources files (CSS and Javascript). There's also some bugs fixes and features enhancements. Backward compatibility with old incremental report might be preserved. - Sources and licences of resources files are now on a dedicated subdirectory. A script to update their minified version embedded in pgbager script has been added. Thanks to Christoph Berg for the help and feature request. - Try to detect user/database/host from connection strings if log_connection is enabled and log_line_prefix doesn't include them. Extend the regex to autodetect database name, user name, client ip address and application name. The regex now are the following: db => qr/(?:db|database)=([^,]*)/; user => qr/(?:user|usr)=([^,]*)/; client => qr/(?:client|remote|ip|host)=([^,]*)/; appname => qr/(?:app|application)=([^,]*)/; - Add backward compatibility with older version of pgbadger in incremental mode by creating a subdirectory for new CSS and Javascript files. This subdirectory is named with the major version number of pgbadger. - Increase the size of the pgbadger logo that appears too small with the new font size. - Normalize detailed information in all reports. - Fix duplicate copy icon in locks report. - Fix missing chart on histogram of session time. Thanks to Guillaume Lelarge for the report. - Add LICENSE file noting the licenses used by the resource files. Thanks to Christoph Berg for the patch. - Add patch to jqplot library to fix an infinite loop when trying to download some charts. Thanks to Julien Tachoires for the help to solve this issue. - Script tools/updt_embedded_rsc.pl will apply the patch to resource file resources/jquery.jqplot.js and doesn't complain if it has already been applied. - Remove single last comma at end of pie chart dataset. Thanks to Julien Tachoires for the report. - Change display of normalized error - Remove unused or auto-generated files - Update all resources files (js+css) and create a directory to include source of javascript libraries used in pgbadger. There is also a new script tools/updt_embedded_rsc.pl the can be used to generate the minified version of those files and embedded them into pgbadger. This script will also embedded the FontAwesome.otf open truetype font into the fontawesome.css file. 2016-08-27 - v8.3 This is a maintenance release that fix some minor bugs. This release also adds replication command messages statistics to the Events reports. - Fix auto-detection of stderr format with timestamp as epoch (%n). - Fix histogram over multiples days to be cumulative per hour, not an average of the number of event per day. - Fix parsing of remote file that was failing when the file does not exists locally. Thanks to clomdd for the report. - Detect timezones like GMT+3 on CSV logs. Thanks to jacksonfoz for the patch. - Add replication command messages statistics to the Events reports. Thanks to Michael Paquier for the feature request. This is the last minor version of the 8.x series, next major version will include an upgrade of boostrap and jquery library which need some major rewrite. 2016-08-11 version 8.2 This is a maintenance release that fix some minor bug. There is also some performances improvement up to 20% on huge files and some new interesting features: * Multiprocessing can be used with pgbouncer log files. * pgBouncer and PostgreSQL log files can be used together in incremental mode. * With default or same prefix, stderr and syslog file can be parsed together, csvlog format can always be used. * Use a modal dialog window to download graphs as png images. * Add pl/pgSQL function information to queries when available. Here are the complete list of changes: - Fix report of database system messages. - Fix multi line statement concatenation after an error. - Fix box size for report of queries generating the most temporary files and the most waiting queries. - Rewrite code to better handle multi-line queries. - Fix garbage in examples of event queries with error only mode (option -w). Thanks to Thomas Reiss for the report. - Fix getting dataset related to query duration with the use of auto_explain. Thanks to tom__b for the patch. - Use a modal dialog window to download graphs as png images. - Huge rewrite of the incremental mechanism applied to log files to handle PostgreSQL and pgbouncer logs at the same time. - Multiprocess can be used with pgbouncer log. - Add code to remove remaining keyword placeholders tags. - Fix an other possible case of truncated date in LAST_PARSED file Thanks to brafaeloliveira for the report. - Set default scale 1 in pretty_print_number() js function. - Fix auto-detection of pgbouncer files that contain only stats lines. Thanks to Glyn Astill for the patch. - Add date to samples of queries generating most temporary files. - Do not display warning message of empty log when quiet mode is enable. - Fix reading from stdin by disabling pgbouncer format detection. Thanks to Robert Vargason for the patch. - Fix case of duplicate normalized error message with "nonstandard use of ...". - Fix storage of current temporary file related request. - Use the mnemonic rather than a signal number in kill calls. Thanks to Komeda Shinji for the patch. 2016-04-21 version 8.1 This is a maintenance release that fix a major issue introduced with support to pgbouncer that prevent parsing of compressed PostgreSQL log files and adds some improvements. Here are the complete list of changes: - Fix one case where pid file remain after dying. - Add requirement of log_error_verbosity = default to documentation. - Report message "LOG: using stale statistics instead of current ones because stats collector is not responding" in events view. - Remove obsolete days when we are in binary mode with --noreport - Fix wrong report of statements responsible of temporary files. Thanks to Luan Nicolini Marcondes for the report. This patch also exclude line with log level LOCATION to be parsed. - Fix limit on number of sample at report generation and remove pending LAST_PARSED.tmp file. - Update load_stat() function and global variables to support pgbouncer statistics. Update version to 2.0. - Handle more kind or query types. Thanks to julien Rouhaud for the patch. - Fix pgbouncer log parser to handle message: FATAL: the database system is shutting down - Fix whitespace placed in between the E and the quote character. Thanks to clijunky for the report. - Fix a major issue introduced with support to pgbouncer that prevent parsing of compressed PostgreSQL log files. Thanks to Levente Birta for the report. 2016-02-22 version 8.0 This is a major release that adds support to pgbouncer log files. New pgbouncer reports are: * Request Throughput * Bytes I/O Throughput * Queries Average duration * Simultaneous sessions * Histogram of sessions times * Sessions per database * Sessions per user * Sessions per host * Established connections * Connections per database * Connections per user * Connections per host * Most used reserved pools * Most Frequent Errors/Events pgbouncer log files can be parsed together with PostgreSQL logs. It also adds a two new command line options: * --pgbouncer-only to only show pgbouncer related reports. * --rebuild to be able to rebuild all html reports in incremental output directory where binary data files are still available. This release fixes a major bug introduced with journalctl code that was prevented the use of multiprocess feature. Here the complete list of other changes: - Fix progress bar with pgbouncer (only events are increased). - Sort %SYMBOLE hashtable for remove "!=" / "=" bug. Thanks to Nicolas Gollet for the patch. - Fix incorrect numbers on positional parameters in report Queries generating most temporary files. Thanks to Oskar Wiksten for the report. - Update operators list in SQL code beautifier with last update in pgFormatter. Thanks to Laurenz Albe for the report and the list of missing operators. - Cosmetic change to code and add some more debug information. 2016-01-18 version 7.3 This is a maintenance release to fix a major bug that was breaking the incremental mode in pgBadger. It also adds some more reports and features. * Add --timezone=+/-HH to control the timezone used in charts. The javascript library runs at client side so the timezone used is the browser timezone so the displayed time in the charts can be different from the time in the log file. * Add /tmp/pgbadger.pid file to prevent cron jobs overlaping on same log files. * Add command line option --pid-dir to be able to run two pgbadger at the same time by setting an alternate path to the pid file. * Report information about "LOG: skipping analyze of ..." into events reports. * Report message "LOG: sending cancel to blocking autovacuum" into events reports. Useful to look for queries generating autovacuum kill on account of a lock issue. Here the complete list of changes: - Automatically remove obsolete pid file when there is no other pgbadger process running (unix only) - Update documentation about the --timezone command line option. - Add --timezone=+/-HH to control the timezone used in charts. Thanks to CZAirwolf for the report. - Fix Histogram of session times when there is no data. - Fix unclosed test file. - Fix an other case where pgbadger.pid was not removed. - Always display slides part on connections report even if there is no data. - Fix some label on sessions reports - Add remove of pid file at normal ending. - Fix wrong size/offset of log files that was breaking incremental mode. Thanks a lot to CZAirwolf for the report and the help to find the problem. - Add command line option --pid-dir to be able to run two pgbadger at the same time by setting an alternate path to the directory where the pid file will be written. - Add /tmp/pgbadger.pid file to prevent cron jobs overlaping on same log files. - Report information about "LOG: skipping analyze of ..." into events reports. - Report message "LOG: sending cancel to blocking autovacuum" into events reports. Usefull to know which queries generate autovacuum kill on account of a lock issue. - Add more debug information about check log parsing decision. 2016-01-05 version 7.2 This new release fixes some issues especially in temporary files reports and adds some features. * Allow pgBadger to parse natively the journalctl command output * Add new keywords from PG 9.5 for code formating * Add support to %n log_line_prefix option for Unix epoch (PG 9.6) There's also some new command line option: * Adds --journalctl_cmd option to enable this functionality and set the command. Typically: --journalctl_cmd "journalctl -u postgresql-9.4" to parse output of PG 9.4 log Here is the full list of changes/fixes: - Fix missing detailed information (date, db, etc.) in Queries generating the largest temporary files report. - Fix label of sessions histogram. Thanks to Guillaume Lelarge for the patch. - Fix to handle cancelled query that generate more than one temporary file and more generally aggregate size on queries with multiple (> 1GB) temporary files. - Add "Total size" column in Temporary Files Activity table and fix size increment when a query have multiple 1GB temporary file. - Fix temporary file query normalization and examples. - Fix incomplete and wrong queries associated to temporary files when STATEMENT level log line was missing. Thanks to Mael Rimbault for the report. - When -w or --watch-mode is used, message "canceling statement due to statement timeout" s now reported with other errors. - Allow dot in dbname and user name. Thanks to David Turvey for the patch. - Remove use of unmaintained flotr2 javascript chart library and use of jqflot instead. - Fix bad formatting with anonymized values in queries. - Display 0ms instead of 0s when qery time is under the millisecond. Thanks to venkatabn for the report. - Normalize cursor names. Patch from Julien Rouhaud - Fix unregistered client host name with default pattern. Thanks to Eric S. Lucinger Ruiz for the report. - Remove redundant regular expressions. - Tweaking awkward phrasing, correcting subject-verb agreements, typos, and misspellings. Patch from Josh Kupershmid. - Fix potential incorrect creation of subdirectory in incremental mode. - Allow single white space after duration even if this should not appear. - Update copyright. 2015-07-11 version 7.1 This new release fixes some issues and adds a new report: * Distribution of sessions per application It also adds Json operators to SQL Beautifier. Here is the full list of changes/fixes: - Fix unwanted seek on old parsing position when log entry is stdin. Thanks to Olivier Schiavo for the report. - Try to fix a potential issue in log start/end date parsing. Thanks to gityerhubhere for the report. - Fix broken queries with multiline in bind parameters. Thank to Nicolas Thauvin for the report. - Add new report Sessions per application. Thanks to Keith Fiske for the feature request. - Add Json Operators to SQL Beautifier. Thanks to Tom Burnett and Hubert depesz Lubaczewski. - Makefile.PL: changed manpage section from '1' to '1p', fixes #237. Thanks to Cyril Bouthors for the patch. - Update Copyright date-range and installation instructions that was still refering to version 5. Thanks to Steve Crawford for the report. - Fix typo in changelog Note that new official releases must now be downloaded from GitHub and no more from SourceForge. Download at https://github.com/dalibo/pgbadger/releases 2015-05-08 version 7.0 This major release adds some more useful reports and features. * New report about events distribution per 5 minutes. * New per application details (total duration and times executed) for each query reported in Top Queries reports. The details are visible from a new button called "App(s) involved". * Add support to auto_explain extension. EXPLAIN plan will be added together with top slowest queries when available in log file. * Add a link to automatically open the explain plan on http://explain.depesz.com/ * New report on queries cumulated durations per user. * New report about the Number of cancelled queries (graph) * New report about Queries generating the most cancellation (N) * New report about Queries most cancelled. Here is the full list of changes/fixes: - Update documentation with last reports. - Fix number of event samples displayed in event reports. - Add new report about events distribution per x minutes. - Add app=%a default prefix to documentation. - Add reports of "App(s) involved" with top queries. Thanks to Antti Koivisto for the feature request. - Remove newline between a ) and , in the beautifier. - Add link to automatically open the explain plan on http://explain.depesz.com/ - Add support to auto_explain, EXPLAIN plan will be added together with top slowest queries when available in the log file. - Add a graph on distributed duration per user. Thanks to Korriliam for the patch. - Add tree new report: Number of cancelled queries (graph), Queries generating the most cancellation (N) and Queries most cancelled lists. Thanks to Thomas Reiss for the feature request. - Fix case where temporary file statement must be retrieved from the previous LOG statement and not in the following STATEMENT log entry. Thanks to Mael Rimbault for the report. - Add --enable-checksum to show a md5 hash of each reported queries. Thanks to Thomas Reiss for the feature request. 2015-04-13 version 6.4 This new release fixes a major bugs in SQL beautifier which removed operator and adds some useful improvement in anonymization of parameters values. pgBadger will also try to parse the full csvlog when a broken CSV line is encountered. - Make anonymization more useful. Thanks to Hubert depesz Lubaczewski for the patch. - Fix previous patch for csvlog generated with a PostgreSQL version before 9.0. - Try continue CSV parsing after broken CSV line. Thanks to Sergey Burladyan for the patch. - Fix bug in SQL beautifier which removed operator. Thanks to Thomas Reiss for the report. - Fix loop exit, check terminate quickly and correct comments indentation. Thanks to Sergey Burladyan for the patch Please upgrade. 2015-03-27 version 6.3 This new release fixes some bugs and adds some new reports: * A new per user details (total duration and times executed) for each query reported in Top Queries reports. The details are visible from a new button called "User(s) involved". * Add "Average queries per session" and "Average queries duration per session" in Sessions tab of the Global statistics. * Add connection time histogram. * Use bar graph for Histogram of query times and sessions times. There's also some cool new features and options: * Add -L | --logfile-list option to read a list of logfiles from an external file. * Add support to log_timezones with + and - signs for timestamp with milliseconds (%m). * Add --noreport option to instruct pgbadger to not build any HTML reports in incremental mode. pgBadger will only create binary files. * Add auto detection of client=%h or remote=%h from the log so that adding a prefix is not needed when it respect the default of pgbadger. * Redefine sessions duration histogram bound to be more accurate. * Add new option -M | --no-multiline to not collect multi-line statement and avoid storing and reporting garbage when needed. * Add --log-duration option to force pgbadger to associate log entries generated by both log_duration=on and log_statement=all. The pgbadger_tools script have also been improve with new features: * Add a new tool to pgbadger_tool to output top queries in CSV format for follow-up analysis. * Add --explain-time-consuming and --explain-normalized options to generate explain statement about top time consuming and top normalized slowest queries. Here is the full list of changes/fixes: - Update flotr2.min.js to latest github code. - Add per user detail information (total duration and times executed) for each query reported in "Time consuming queries", "Most frequent queries" "and Normalized slowest queries". The details are visible from a new button called "User(s) involved" near the "Examples" button. Thanks to Guillaume Le Bihan for the patch and tsn77130 for the feature request. - pgbadger_tool: add tool to output top queries to CSV format, for follow-up analysis. Thanks to briklen for the patch. - Add geometric operators to SQL beautifier. Thanks to Rodolphe Quiedeville for the report. - Fix non closing session when a process crash with message: "terminating connection because of crash of another server process". Thanks to Mael Rimbault for the report. - Add -L|--logfile-list command line option to read a list of logfiles from a file. Thanks to Hubert depesz Lubaczewski for the feature request. - Automatically remove %q from prefix. Thanks to mbecroft for report. - Do not store DEALLOCATE log entries anymore. - Fix queries histogram where range was not appears in the right order. Thanks to Grzegorz Garlewicz for the report. - Fix min yaxis in histogram graph. Thanks to grzeg1 for the patch. - Add --log-duration command line option to force pgbadger to associate log entries generated by both log_duration = on and log_statement=all. Thanks to grzeg1 for the feature request. - Small typographical corrections. Thanks to Jefferson Queiroz Venerando and Bill Mitchell the patches. - Reformat usage output and add explanation of the --noreport command line option. - Fix documentation about minimal pattern in custom log format. Thanks to Julien Rouhaud for the suggestion. - Add support to log_timezones with + and - signs to timestamp with milliseconds (%m). Thanks to jacksonfoz for the patch. pgbadger was not recognize log files with timezones like 'GMT+3'. - Add --noreport command line option to instruct pgbadger to not build any reports in incremental mode. pgBadger will only create binary files. Thanks to hubert Depesz Lubaczewski for the feature request. - Add time consuming information in tables of Queries per database... Thanks to Thomas for the feature request. - Add more details about the CSV parser error. It now prints the line number and the last parameter that generate the failure. This should allow to see the malformed log entry. - Change substitution markup in attempt to fix a new look-behind assertions error. Thanks to Paolo Cavallini for the report. - Use bar graph for Histogram of query times and sessions times. - Fix wrong count of min/max queries per second. Thanks to Guillaume Lelarge for the report. Add COPY statement to SELECT or INSERT statements statistics following the copy direction (stdin or stdout). - Fix Illegal division by zero at line 3832. Thanks to MarcoTrek for the report. - Add "Average queries per session" and "Average queries duration per session" in Sessions tab of the Global stat. Thanks to Guillaume Lelarge for the feature request. - Reformat numbers in pie graph tracker. Thanks to jirihlinka for the report. - pgbadger_tools: Add --explain-time-consuming and --explain-normalized to generate explain statement about top time consuming and top normalized slowest queries. Thanks to Josh Kupershmid fot the feature request. - Remove everything than error information from json output when -w | --watch-mode is enable. Thanks to jason. - Fix undefined subroutine encode_json when using -x json. Thanks to jason for the report. - Add auto detection of client=%h or remote=%h from the log so that adding a prefix is not needed when it respect the default of pgbadger. - Redefine sessions duration histogram bound to be more accurate. Thanks to Guillaume Lelarge for the report. - Add connection time histogram. Thanks to Guillaume Lelarge for the feature request. - Add new option -M | --no-multiline to not collect multi-line statement to avoid garbage especially on errors that generate a huge report. - Do not return SUCCESS error code 0 when aborted or something fails. Thanks to Bruno Almeida for the patch. 2014-10-07 version 6.2 This is a maintenance release to fix a regression in SQL traffic graphs and fix some other minor issues. The release also add a new option -D or --dns-resolv to map client ip addresses to FQDN without having log_hostname enabled on the postgresql's configuration - Do not display queries in Slowest individual, Time consuming and Normalized slowest queries reports when there is no duration in log file. Display NO DATASET instead. - Fix min/max queries in SQL traffic that was based on duration instead of query count. - Fix wrong unit to Synced files in Checkpoints files report. Thanks to Levente Birta for the report. - Enable allow_loose_quotes in Text::CSV_XS call to fix CSV parsing error when fields have quote inside an unquoted field. Thanks to Josh Berkus for the report. - Add -D | --dns-resolv command line option to replace ip addresses by their DNS name. Be warned that this can slow down pgBagder a lot. Thanks to Jiri Hlinka for the feature request. 2014-09-25 version 6.1 This release fix some issues and adds some new features. It adds a new option -B or --bar-graph to use bar instead of line in graphs. It will also keep tick formatting when zooming. The release also add a new program: pgbadger_tools to demonstrate how to works with pgBadger binary files to build your own new feature. The first tools 'explain-slowest' allow printing of top slowest queries as EXPLAIN statements. There's also additional options to execute automatically the statements with EXPLAIN ANALYZE and get the execution plan. See help of the program for more information or the README file in the tools directory. Some modifications will change certain behavior: - The -T | --title text value will now be displayed instead of the pgBadger label right after the logo. It was previously displayed on mouse over the pgBadger label. Here is the full list of changes/fixes: - Change -T | --title position on pgBadger report. Title now override the pgBadger label. Thanks to Julien Rouhauld for the patch. - Add --file-per-query and --format-query option to write each slowest query in separate file named qryXXX.sql and perform minimal formating of the queries. Thanks to Rodolphe Quiedeville for the patch. - Remove debug query from explain-slowest tool. - Fix surge in sessions number report when an exclusion or inclusion option (dbname, user, appname, etc.) is used. Thanks to suyah for the report. - Fix fatal error when remote log file is 0 size. Thanks to Julien Rouhaud for the report. - Allow pgbadger_tools --explain-slowest to automatically execute the EXPLAIN statements an report the plan. See pgbadger_tools --help for more explanation. - Add --analyze option to replace EXPLAIN statements by EXPLAIN (ANALYZE, VERBOSE, BUFFERS). - Move pgbadger_tools program and README.tools into the tools/ subdirectory with removing the extension. Add more comments and explanations. - Fix case where die with interrupt signal is received when using -e option. Thanks to Lloyd Albin for the report. - Add a new program pgbadger_tools to demonstrate how to deal with pgBadger binary files to build your own new feature. The first one 'explain-slowest' allow printing of top slowest queries as EXPLAIN statements. - Keep tick formatting when zooming. Thanks to Julien Rouhaud for the patch. - Fix automatic detection of rsyslogd logs. Thanks to David Day for the report. - Fix issue in calculating min/max/avg in "General Activity" report. It was build on the sum of queries duration per minutes instead of each duration. Thanks to Jayadevan M for the report. - The same issue remains with percentile that are build using the sum of duration per minutes and doesn't represent the real queries duration. - This commit also include a modification in convert_time() method to reports milliseconds. - Add -B or --bar-graph command line option to use bar instead of line in graph. Thanks to Bart Dopheide for the suggestion. - Fix Checkpoint Wal files usage graph title. 2014-08-08 version 6.0 This new major release adds some new features like automatic cleanup of binary files in incremental mode or maximum number of weeks for reports retention. It improve the incremental mode with allowing the use of multiprocessing with multiple log file. It also adds report of query latency percentile on the general activity table (percentiles are 90, 95, 99). There's also a new output format: JSON. This format is good for sharing data with other languages, which makes it easy to integrate pgBadger's result into other monitoring tools. You may want to expose your reports but not the data, using the --anonymize option pgBadger will be able to anonymize all literal values in the queries. Sometime select to copy a query from the report could be a pain. There's now a click-to-select button in front of each query that allow you to just use Ctrl+C to copy it on clipboard The use of the new -X option also allow pgBadger to write out extra files to the outdir when creating incremental reports. Those files are the CSS and Javascript code normally repeated in each HTLM files. Warning: the behavior of pgBadger in incremental mode has changed. It will now always cleanup the output directory of all the obsolete binary file. If you were using those files to build your own reports, you can prevent pgBadger to remove them by using the --noclean option. Note that if you use the retention feature, all those files in obsolete directories will be removed too. Here is the complete list of changes. - Javascript improvement to use only one call of sql_select and sql_format. Use jQuery selector instead of getElementById to avoid js errors when not found. Thanks to Julien Rouhaud for the patches. - Add -R | --retention command line option to set the maximum number of week reports to preserve in the output directory for incremental mode. Thanks to Kong Man for the feature request. - Session count is immediately decreased when a FATAL error is received in the current session to prevent overcount of simultaneous session number. Thanks to Josh Berkus for the report. - Fix issue in incremental mode when parsing is stopped after rotating log and rotated log has new lines. The new file was not parsed at all. Thanks to CZAirwolf for the report. - Fix revert to single thread when last_line_parsed exists. Thanks to Bruno Almeida for the report. - Fix issue in handling SIGTERM/SIGINT that cause pgbadger to continue. - Add autoclean feature to pgbadger in incremental mode. pgbadger will now removed automatically obsolete binary files unless you specify --noclean at command line. - Add new command line option --anonymize to obscure all literals in queries/errors to hide confidential data. Thanks to wmorancfi for the feature request. - Fix single "SELECT;" as a query in a report. Thanks to Marc Cousin for the report. - Add a copy icon in front of each query in the report to select the entire query. Thanks to Josh Berkus for the feature request. - Fix wrong move to beginning of a file if the file was modified after have been parsed a time. Thanks to Herve Werner for the report. - Allow pgBadger to write out extra files to outdir when creating incremental reports. Require the use of the -X or --extra-files option in incremental mode. Thanks to Matthew Musgrove for the feature request. - Fix incomplete handling of XZ compressed format. - Fix move to offset in incremental mode with multiprocess and incomplete condition when file is smaller than the last offset. Thanks to Herve Werner for the report. - Allow/improve incremental mode with multiple log file and multiprocess. - Fix incorrect location of temporary file storing last parsed line in multiprocess+incremental mode. Thanks to Herve Werner for the report. - Fix remote ssh command error sh: 2: Syntax error: "|" unexpected. Thanks to Herve Werner for the report. - Fix missing database name in samples of top queries reports. Thanks to Thomas Reiss for the report. - Add minimal documentation about JSON output format. - Add execute attribute to pgbadger in the source repository, some may find this more helpful when pgbadger is not installed and executed directly from this repository. - Fix issue with csv log format and incremental mode. Thanks to Suya for the report and the help to solve the issue. There also a fix to support autovacuum statistic with csv format. - Fix bad URL to documentation. Thanks to Rodolphe Quiedeville for the report. - Two minor change to made easier to use Tsung scenario: Remove the first empty line and replace probability by weight. Now it is possible to use the scenario as is with Tsung 1.5. - Fix incremental mode where weeks on index page start on sunday and week reports start on monday. Thanks to flopma and birkosan for the report. - Replace label "More CPU costly" by "Highest CPU-cost". Thanks to Marc Cousin for the suggestion. - Add query latency percentile to General Activity table (percentiles are 90, 95, 99). Thanks to Himanchali for the patch. - Fix typon pgbadger call. Thanks to Guilhem Rambal for the report. - Add JSON support for output format. JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger's result into other monitoring tools like Cacti or Graphite. Thanks to Shanzhang Lan for the patch. - Update documentation about remote mode feature. - Update documentation to inform that the xz utility should be at least in version 5.05 to support the --robot command line option. Thanks to Xavier Millies-Lacroix for the report. - Fix remote logfile parsing. Thanks to Herve Werner for the report. 2014-05-05 version 5.1-1 - Fix parsing of remote log file, forgot to apply some patches. Thank to Herve Werner for the report. 2014-05-04 version 5.1 This new release fixes several issues and adds several new features like: * Support to named PREPARE and EXECUTE queries. They are replaced by the real prepare statement and reported into top queries. * Add new --exclude-line command line option for excluding immediately log entries matching any regex. * Included remote and client information into the most frequent events. * pgBadger is now able to parse remote logfiles using a password less ssh connection and generate locally the reports. * Histogram granularity can be adjusted using the -A command line option. * Add new detail information on top queries to show when the query is a bind query. * Support to logfile compressed using the xz compression format. * Change week/day menu in incremental index, it is now represented as usual with a calendar view per month. * Fix various compatibility issue with Windows and Perl 5.8 Here is the full list of changes: - fixed calendar display and correct typo. Thanks to brunomgalmeida for the patch. - revert to single thread if file is small. Thanks to brunomgalmeida for the patch. - print calendars 4+4+4 instead of 3+4+4+1 when looking at full year. Thanks to brunomgalmeida for the patch. - Add --exclude-line option for excluding log entries with a regex based on the full log line. Thanks to ferfebles for the feature request. - Fix SQL keywords that was beautified twice. - Remove duplicate pg_keyword in SQL beautifier. - Fix increment of session when --disable-session is activated. - Fix missing unit in Checkpoints Activity report when time value is empty. Thanks to Herve Werner for the report. - Fix double information in histogram data when period is the hour. - Add support to named PREPARE and EXECUTE queries. Calls to EXECUTE statements are now replaced by the prepared query and show samples with parameters. Thanks to Brian DeRocher for the feature request. - Included Remote and Client information into the most frequent events examples. Thanks to brunomgalmeida for the patch. - Fix documentation about various awkward phrasings, grammar, and spelling. Consistently capitalize "pgBadger" as such, except for command examples which should stay all-lowercase. Thanks to Josh Kupershmidt for the patch. - Fix incremental mode on Windows by replacing %F and %u POSIX::strftime format to %Y-%m-%d and %w. Thanks to dthiery for the report. - Remove Examples button when there is no examples available. - Fix label on tips in histogram of errors reports. - Fix error details in incremental mode in Most Frequent Errors/Events report. Thanks to Herve Werner for the report. - Fix Sync time value in Checkpoints buffers report. Thanks to Herve Werner for the report. - Fix wrong connections per host count. Thanks to Herve Werner for the report. - Allow pgBadger to parse remote log file using a password less ssh connection. Thanks to Orange OLPS department for the feature request. - Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries or errors occurring per hour. You can now specify the granularity down to the minute. Thanks to Orange OLPS department for the feature request. - Add new detail information on top queries to show when the query is a bind query. Thanks to Orange OLPS department for the feature request. - Fix queries that exceed the size of the container. - Add unit (seconds) to checkpoint write/sync time in the checkpoints activity report. Thanks to Orange OLPS department for the report. - Fix missing -J option in usage. - Fix incomplete lines in split logfile to rewind to the beginning of the line. Thanks to brunomgalmeida for the patch. - Fix tsung output and add tsung xml header sample to output file. - Make it possible to do --sample 0 (prior it was falling back to the default of 3). Thanks to William Moran for the patch. - Fix xz command to be script readable and always have size in bytes: xz --robot -l %f | grep totals | awk "{print $5}" - Add support to logfile compressed by the xz compression format. Thanks to Adrien Nayrat for the patch. - Do not increment queries duration histogram when prepare|parse|bind log are found, but only with execute log. Thanks to Josh Berkus for the report. - Fix normalization of error message about unique violation when creating intermediate dirs. Thanks to Tim Sampson for the report. - Allow use of Perl metacharacters like [..] in application name. Thanks to Magnus Persson for the report. - Fix dataset tip to be displayed above image control button. Thanks to Ronan Dunklau for the fix. - Renamed the Reset bouton to "To Chart" to avoid confusion with unzoom feature. - Fix writing of empty incremental last parsed file. - Fix several other graphs - Fix additional message at end of query or error when it was logged from application output. Thanks to Herve Werner for the report. - Fix checkpoint and vacuum graphs when all dataset does not have all values. Thanks to Herve Werner for the report. - Fix week numbered -1 in calendar view. - Change week/day menu in incremental index, it is now represented as usual with a calendar view per month. Thanks to Thom Brown for the feature request. - Load FileHandle to fix error: Can not locate object method "seek" via package "IO::Handle" with perl 5.8. Thanks to hkrizek for the report. - Fix count of queries in progress bar when there is compressed file and multiprocess is enabled. Thanks to Johnny Tan for the report. - Fix debug message "Start parsing at offset" - Add ordering in queries times histogram. Thanks to Ulf Renman for the report. - Fix various typos. Thanks to Thom Brown for the patch. - Fix Makefile error, "WriteMakefile: Need even number of args at Makefile.PL" with Perl 5.8. Thanks to Fangr Zhang for the report. - Fix some typo in Changelog 2014-02-05 version 5.0 This new major release adds some new features like incremental mode and SQL queries times histogram. There is also a hourly graphic representation of the count and average duration of top normalized queries. Same for errors or events, you will be able to see graphically at which hours they are occurring the most often. The incremental mode is an old request issued at PgCon Ottawa 2012 that concern the ability to construct incremental reports with successive runs of pgBadger. It is now possible to run pgbadger each days or even more, each hours, and have cumulative reports per day and per week. A top index page allow you to go directly to the weekly and daily reports. This mode have been build with simplicity in mind so running pgbadger by cron as follow: 0 23 * * * pgbadger -q -I -O /var/www/pgbadger/ /var/log/postgresql.log is enough to have daily and weekly reports viewable using your browser. You can take a look at a sample report at http://dalibo.github.io/pgbadger/demov5/index.html There's also a useful improvement to allow pgBadger to seek directly to the last position in the same log file after a successive execution. This feature is only available using the incremental mode or the -l option and parsing a single log file. Let's say you have a weekly rotated log file and want to run pgBadger each days. With 2GB of log per day, pgbadger was spending 5 minutes per block of 2 GB to reach the last position in the log, so at the end of the week this feature will save you 35 minutes. Now pgBadger will start parsing new log entries immediately. This feature is compatible with the multiprocess mode using -j option (n processes for one log file). Histogram of query times is a new report in top queries slide that shows the query times distribution during the analyzed period. For example: Range Count Percentage -------------------------------------------- 0-1ms 10,367,313 53.52% 1-5ms 799,883 4.13% 5-10ms 451,646 2.33% 10-25ms 2,965,883 15.31% 25-50ms 4,510,258 23.28% 50-100ms 180,975 0.93% 100-500ms 87,613 0.45% 500-1000ms 5,856 0.03% 1000-10000ms 2,697 0.01% > 10000ms 74 0.00% There is also some graphic and report improvements, like the mouse tracker formatting that have been reviewed. It now shows a vertical crosshair and all dataset values at a time when mouse pointer moves over series. Automatic queries formatting has also been changed, it is now done on double click event as simple click was painful when you want to copy some part of the queries. The report "Simultaneous Connections" has been relabeled into "Established Connections", it is less confusing as many people think that this is the number of simultaneous sessions, which is not the case. It only count the number of connections established at same time. Autovacuum reports now associate database name to the autovacuum and autoanalyze entries. Statistics now refer to "dbname.schema.table", previous versions was only showing the pair "schema.table". This release also adds Session peak information and a report about Simultaneous sessions. Parameters log_connections and log_disconnections must be enabled in postgresql.conf. Complete ChangeLog: - Fix size of SQL queries columns to prevent exceeding screen width. - Add new histogram reports on top normalized queries and top errors or event. It shows at what hours and in which quantity the queries or errors appears. - Add seeking to last parser position in log file in incremental mode. This prevent parsing all the file to find the last line parse from previous run. This only works when parsing a single flat file, -j option is permitted. Thanks to ioguix for the kick. - Rewrite reloading of last log time from binary files. - Fix missing statistics of last parsed queries in incremental mode. - Fix bug in incremental mode that prevent reindexing a previous day. Thanks to Martin Prochazka for the great help. - Fix missing label "Avg duration" on column header in details of Most frequent queries (N). - Add vertical crosshair on graph. - Fix case where queries and events was not updated when using -b and -e command line. Thanks to Nicolas Thauvin for the report. - Fix week sorting on incremental report main index page. Thanks to Martin Prochazka for the report. - Add "Histogram of query times" report to show statistics like 0-100ms : 80%, 100-500ms :14%, 500-1000ms : 3%, > 1000ms : 1%. Thanks to tmihail for the feature request. - Format mouse tracker on graphs to show all dataset value at a time. - Add control of -o vs -O option with incremental mode to prevent wrong use. - Change log level of missing LAST_PARSED.tmp file to WARNING and add a HINT. - Update copyright date to 2014 - Fix empty reports of connections. Thanks to Reeshna Ramakrishnan for the report. - Fix display of connections peak when no connection was reported. - Fix warning on META_MERGE for ExtUtils::MakeMaker < 6.46. Thanks to Julien Rouhaud for the patch. - Add documentation about automatic incremental mode. - Add incremental mode to pgBadger. This mode will build a report per day and a cumulative report per week. It also create an index interface to easiest access to the different report. Must be run, for example, as: pgbadger /var/log/postgresql.log.1 -I -O /var/www/pgbadger/ after a daily PostgreSQL log file rotation. - Add -O | --outdir path to specify the directory where out file must be saved. - Automatic queries formatting is now done on double click event, simple click was painful when you want to copy some part of the queries. Thanks to Guillaume Smet for the feature request. - Remove calls of binmode to force html file output to be utf8 as there is some bad side effect. Thanks to akorotkov for the report. - Remove use of Time::HiRes Perl module as some distributions does not include this module by default in core Perl install. - Fix "Wide character in print" Perl message by setting binmode to :utf8. Thanks to Casey Allen Shobe for the report. - Fix application name search regex to handle application name with space like "pgAdmin III - Query Tool". - Fix wrong timestamps saved with top queries. Thanks to Herve Werner for the report. - Fix missing logs types statitics when using binary mode. Thanks to Herve Werner for the report. - Fix Queries by application table column header: Database replaced by Application. Thanks to Herve Werner for the report. - Add "Max number of times the same event was reported" report in Global stats Events tab. - Replace "Number of errors" by "Number of ERROR entries" and add "Number of FATAL entries". - Replace "Number of errors" by "Number of events" and "Total errors found" by "Total events found" in Events reports. Thanks to Herve Werner for the report. - Fix title error in Sessions per database. - Fix clicking on the info link to not go back to the top of the page. Thanks to Guillaume Smet for the report and solution. - Fix incremental report from binary output where binary data was not loaded if no queries were present in log file. Thanks to Herve Werner for the report. - Fix parsing issue when log_error_verbosity = verbose. Thanks to vorko for the report. - Add Session peak information and a report about Simultaneous sessions. log_connections+log_disconnections must be enabled in postgresql.conf. - Fix wrong requests number in Queries by user and by host. Thanks to Jehan-Guillaume de Rorthais for the report. - Fix issue with rsyslog format failing to parse logs. Thanks to Tim Sampson for the report. - Associate autovacuum and autoanalyze log entry to the corresponding database name. Thanks to Herve Werner for the feature request. - Change "Simultaneous Connections" label into "Established Connections", it is less confusing as many people think that this is the number of simultaneous sessions, which is not the case. It only count the number of connections established at same time. Thanks to Ronan Dunklau for the report. 2013-11-08 version 4.1 This release fixes two major bugs and some others minor issues. There's also a new command line option --exclude-appname that allow exclusion from the report of queries generated by a specific program, like pg_dump. Documentation have been updated with a new chapter about building incremental reports. - Add log_autovacuum_min_duration into documentation in chapter about postgresql configuration directives. Thanks to Herve Werner for the report. - Add chapter about "Incremental reports" into documentation. - Fix reports with per minutes average where last time fraction was not reported. Thanks to Ludovic Levesque and Vincent Laborie for the report. - Fix unterminated comment in information popup. Thanks to Ronan Dunklau for the patch. - Add --exclude-appname command line option to eliminate unwanted traffic generated by a specific application. Thanks to Steve Crawford for the feature request. - Allow external links use into URL to go to a specific report. Thanks to Hubert depesz Lubaczewski for the feature request. - Fix empty reports when parsing compressed files with the -j option which is not allowed with compressed file. Thanks to Vincent Laborie for the report. - Prevent progress bar length to increase after 100% when real size is greater than estimated size (issue found with huge compressed file). - Correct some spelling and grammar in ChangeLog and pgbadger. Thanks to Thom Brown for the patch. - Fix major bug on SQL traffic reports with wrong min value and bad average value on select reports, add min/max for select queries. Thanks to Vincent Laborie for the report. 2013-10-31 - Version 4.0 This major release is the "Say goodbye to the fouine" release. With a full rewrite of the reports design, pgBadger has now turned the HTML reports into a more intuitive user experience and professional look. The report is now driven by a dynamic menu with the help of the embedded Bootstrap library. Every main menu corresponds to a hidden slide that is revealed when the menu or one of its submenus is activated. There's also the embedded font Font Awesome webfont to beautify the report. Every statistics report now includes a key value section that immediately shows you some of the relevant information. Pie charts have also been separated from their data tables using two tabs, one for the chart and the other one for the data. Tables reporting hourly statistics have been moved to a multiple tabs report following the data. This is used with General (queries, connections, sessions), Checkpoints (buffer, files, warnings), Temporary files and Vacuums activities. There's some new useful information shown in the key value sections. Peak information shows the number and datetime of the highest activity. Here is the list of those reports: - Queries peak - Read queries peak - Write queries peak - Connections peak - Checkpoints peak - WAL files usage Peak - Checkpoints warnings peak - Temporary file size peak - Temporary file number peak Reports about Checkpoints and Restartpoints have been merged into a single report. These are almost one in the same event, except that restartpoints occur on a slave cluster, so there was no need to distinguish between the two. Recent PostgreSQL versions add additional information about checkpoints, the number of synced files, the longest sync and the average of sync time per file. pgBadger collects and shows this information in the Checkpoint Activity report. There's also some new reports: - Prepared queries ratio (execute vs prepare) - Prepared over normal queries - Queries (select, insert, update, delete) per user/host/application - Pie charts for tables with the most tuples and pages removed during vacuum. The vacuum report will now highlight the costly tables during a vacuum or analyze of a database. The errors are now highlighted by a different color following the level. A LOG level will be green, HINT will be yellow, WARNING orange, ERROR red and FATAL dark red. Some changes in the binary format are not backward compatible and the option --client has been removed as it has been superseded by --dbclient for a long time now. If you are running a pg_dump or some batch process with very slow queries, your report analysis will be hindered by those queries having unwanted prominence in the report. Before this release it was a pain to exclude those queries from the report. Now you can use the --exclude-time command line option to exclude all traces matching the given time regexp from the report. For example, let's say you have a pg_dump at 13:00 each day during half an hour, you can use pgbadger as follows: pgbadger --exclude-time "2013-09-.* 13:.*" postgresql.log If you are also running a pg_dump at night, let's say 22:00, you can write it as follows: pgbadger --exclude-time '2013-09-\d+ 13:[0-3]' --exclude-time '2013-09-\d+ 22:[0-3]' postgresql.log or more shortly: pgbadger --exclude-time '2013-09-\d+ (13|22):[0-3]' postgresql.log Exclude time always requires the iso notation yyyy-mm-dd hh:mm:ss, even if log format is syslog. This is the same for all time-related options. Use this option with care as it has a high cost on the parser performance. 2013-09-17 - version 3.6 Still an other version in 3.x branch to fix two major bugs in vacuum and checkpoint graphs. Some other minors bugs has also been fixed. - Fix grammar in --quiet usage. Thanks to stephen-a-ingram for the report. - Fix reporting period to starts after the last --last-parsed value instead of the first log line. Thanks to Keith Fiske for the report. - Add --csv-separator command line usage to documentation. - Fix CSV log parser and add --csv-separator command line option to allow change of the default csv field separator, coma, in any other character. - Avoid "negative look behind not implemented" errors on perl 5.16/5.18. Thanks to Marco Baringer for the patch. - Support timestamps for begin/end with fractional seconds (so it'll handle postgresql's normal string representation of timestamps). - When using negative look behind set sub-regexp to -i (not case insensitive) to avoid issues where some upper case letter sequence, like SS or ST. - Change shebang from /usr/bin/perl to /usr/bin/env perl so that user-local (perlbrew) perls will get used. - Fix empty graph of autovacuum and autoanalyze. - Fix checkpoint graphs that was not displayed any more. 2013-07-11 - Version 3.5 Last release of the 3.x branch, this is a bug fix release that also adds some pretty print of Y axis number on graphs and a new graph that groups queries duration series that was shown as second Y axis on graphs, as well as a new graph with number of temporary file that was also used as second Y axis. - Split temporary files report into two graphs (files size and number of file) to no more used a second Y axis with flotr2 - mouse tracker is not working as expected. - Duration series representing the second Y axis in queries graph have been removed and are now drawn in a new "Average queries duration" independant graph. - Add pretty print of numbers in Y axis and mouse tracker output with PB, TB, GB, KB, B units, and seconds, microseconds. Number without unit are shown with P, T, M, K suffix for easiest very long number reading. - Remove Query type reports when log only contains duration. - Fix display of checkpoint hourly report with no entry. - Fix count in Query type report. - Fix minimal statistics output when nothing was load from log file. Thanks to Herve Werner for the report. - Fix several bug in log line parser. Thanks to Den Untevskiy for the report. - Fix bug in last parsed storage when log files was not provided in the right order. Thanks to Herve Werner for the report. - Fix orphan lines wrongly associated to previous queries instead of temporary file and lock logged statement. Thanks to Den Untevskiy for the report. - Fix number of different samples shown in events report. - Escape HTML tags on error messages examples. Thanks to Mael Rimbault for the report. - Remove some temporary debug informations used with some LOG messages reported as events. - Fix several issues with restartpoint and temporary files reports. Thanks to Guillaume Lelarge for the report. - Fix issue when an absolute path was given to the incremental file. Thanks to Herve Werner for the report. - Remove creation of incremental temp file $tmp_last_parsed when not running in multiprocess mode. Thanks to Herve Werner for the report. 2013-06-18 - Version 3.4 This release adds lot of graphic improvements and a better rendering with logs over few hours. There's also some bug fixes especially on report of queries that generate the most temporary files. - Update flotr2.min.js to latest github code. - Add mouse tracking over y2axis. - Add label/legend information to ticks displayed on mouseover graphs. - Fix documentation about log_statement and log_min_duration_statement. Thanks to Herve Werner for the report. - Fix missing top queries for locks and temporary files in multiprocess mode. - Cleanup code to remove storage of unused information about connection. - Divide the huge dump_as_html() method with one method per each report. - Checkpoints, restart points and temporary files are now drawn using a period of 5 minutes per default instead of one hour. Thanks to Josh Berkus for the feature request. - Change fixed increment of one hour to five minutes on queries graphs "SELECT queries" and "Write queries". Remove graph "All queries" as, with a five minutes increment, it duplicates the "Queries per second". Thanks to Josh Berkus for the feature request. - Fix typos. Thanks to Arsen Stasic for the patch. - Add default HTML charset to utf-8 and a command line option --charset to be able to change the default. Thanks to thomas hankeuhh for the feature request. - Fix missing temporary files query reports in some conditions. Thanks to Guillaume Lelarge and Thomas Reiss for the report. - Fix some parsing issue with log generated by pg 7.4. - Update documentation about missing new reports introduced in previous version 3.3. Note that it should be the last release of the 3.x branch unless there's major bug fixes, but next one will be a major release with a completely new design. 2013-05-01 - Version 3.3 This release adds four more useful reports about queries that generate locks and temporary files. An other new report about restart point on slaves and several bugs fix or cosmetic change. Support to parallel processing under Windows OS has been removed. - Remove parallel processing under Windows platform, the use of waitpid is freezing pgbadger. Thanks to Saurabh Agrawal for the report. I'm not comfortable with that OS this is why support have been removed, if someone know how to fix that, please submit a patch. - Fix Error in tempfile() under Windows. Thanks to Saurabh Agrawal for the report. - Fix wrong queries storage with lock and temporary file reports. Thanks to Thomas Reiss for the report. - Add samples queries to "Most frequent waiting queries" and "Queries generating the most temporary files" report. - Add two more reports about locks: 'Most frequent waiting queries (N)", and "Queries that waited the most". Thanks to Thomas Reiss for the patch. - Add two reports about temporary files: "Queries generating the most temporary files (N)" and "Queries generating the largest temporary files". Thanks to Thomas Reiss for the patch. - Cosmetic change to the Min/Max/Avg duration columns. - Fix report of samples error with csvlog format. Thanks to tpoindessous for the report. - Add --disable-autovacuum to the documentation. Thanks to tpoindessous for the report. - Fix unmatched ) in regex when using %s in prefix. - Fix bad average size of temporary file in Overall statistics report. Thanks to Jehan Guillaume de Rorthais for the report. - Add restartpoint reporting. Thanks to Guillaume Lelarge for the patch. - Made some minor change in CSS. - Replace %% in log line prefix internally by a single % so that it could be exactly the same than in log_line_prefix. Thanks to Cal Heldenbrand for the report. - Fix perl documentation header, thanks to Cyril Bouthors for the patch. 2013-04-07 - Version 3.2 This is mostly a bug fix release, it also adds escaping of HTML code inside queries and the adds Min/Max reports with Average duration in all queries reports. - In multiprocess mode, fix case where pgbadger does not update the last-parsed file and do not take care of the previous run. Thanks to Kong Man for the report. - Fix case where pgbadger does not update the last-parsed file. Thanks to Kong Man for the report. - Add CDATA to make validator happy. Thanks to Euler Taveira de Oliveira for the patch. - Some code review by Euler Taveira de Oliveira, thanks for the patch. - Fix case where stat were multiplied by N when -J was set to N. Thanks to thegnorf for the report. - Add a line in documentation about log_statement that disable log_min_duration_statement when it is set to all. - Add quick note on how to contribute, thanks to Damien Clochard for the patch. - Fix issue with logs read from stdin. Thanks to hubert depesz lubaczewski for the report. - Force pgbadger to not try to beautify queries bigger than 10kb, this will take too much time. This value can be reduce in the future if hang with long queries still happen. Thanks to John Rouillard for the report. - Fix an other issue in replacing bind param when the bind value is alone on a single line. Thanks to Kjeld Peters for the report. - Fix parsing of compressed files together with uncompressed files using the the -j option. Uncompressed files are now processed using split method and compressed ones are parsed per one dedicated process. - Replace zcat by gunzip -c to fix an issue on MacOsx. Thanks to Kjeld Peters for the report. - Escape HTML code inside queries. Thanks to denstark for the report. - Add Min/Max in addition to Average duration values in queries reports. Thanks to John Rouillard fot the feature request. - Fix top slowest array size with binary format. - Fix an other case with bind parameters with value in next line and the top N slowest queries that was repeated until N even if the real number of queries was lower. Thanks to Kjeld Peters for the reports. - Fix non replacement of bind parameters where there is line breaks in the parameters, aka multiline bind parameters. Thanks to Kjeld Peters for the report. - Fix error with seekable export tag with Perl v5.8. Thanks to Jeff Bohmer for the report. - Fix parsing of non standard syslog lines begining with a timestamp like "2013-02-28T10:35:11-05:00". Thanks to Ryan P. Kelly for the report. - Fix issue #65 where using -c | --dbclient with csvlog was broken. Thanks to Jaime Casanova for the report. - Fix empty report in watchlog mode (-w option). 2013-02-21 - Version 3.1 This is a quick release to fix missing reports of most frequent errors and slowest normalized queries in previous version published yesterday. - Fix empty report in watchlog mode (-w option). - Force immediat die on command line options error. - Fix missing report of most frequent events/errors report. Thanks to Vincent Laborie for the report. - Fix missing report of slowest normalized queries. Thanks to Vincent Laborie for the report. - Fix display of last print of progress bar when quiet mode is enabled. 2013-02-20 - Version 3.0 This new major release adds parallel log processing by using as many cores as wanted to parse log files, the performances gain is directly related to the number of cores specified. There's also new reports about autovacuum/autoanalyze informations and many bugs have been fixed. - Update documentation about log_duration, log_min_duration_statement and log_statement. - Rewrite dirty code around log timestamp comparison to find timestamp of the specified begin or ending date. - Remove distinction between logs with duration enabled from variables log_min_duration_statement and log_duration. Commands line options --enable-log_duration and --enable-log_min_duration have been removed. - Update documentation about parallel processing. - Remove usage of Storable::file_magic to autodetect binary format file, it is not include in core perl 5.8. Thanks to Marc Cousin for the report. - Force multiprocess per file when files are compressed. Thanks to Julien Rouhaud for the report. - Add progress bar logger for multiprocess by forking a dedicated process and using pipe. Also fix some bugs in using binary format that duplicate query/error samples per process. - chmod 755 pgbadger - Fix checkpoint reports when there is no checkpoint warnings. - Fix non report of hourly connections/checkpoint/autovacuum when not query is found in log file. Thanks to Guillaume Lelarge for the report. - Add better handling of signals in multiprocess mode. - Add -J|--job_per_file command line option to force pgbadger to use one process per file instead of using all to parse one file. Useful to have better performances with lot of small log file. - Fix parsing of orphan lines with stderr logs and log_line_prefix without session information into the prefix (%l). - Update documentation about -j | --jobs option. - Allow pgbadger to use several cores, aka multiprocessing. Add options -j | --jobs option to specify the number of core to use. - Add autovacuum and autoanalyze infos to binary format. - Fix case in SQL code highlighting where QQCODE temp keyword was not replaced. Thanks to Julien Ruhaud for the report. - Fix CSS to draw autovacuum graph and change legend opacity. - Add pie graph to show repartition of number of autovacuum per table and number of tuples removed by autovacuum per table. - Add debug information about selected type of log duration format. - Add report of tuples/pages removed in report of Vacuums by table. - Fix major bug on syslog parser where years part of the date was wrongly extracted from current date with logs generated in 2012. - Fix issue with Perl 5.16 that do not allow "ss" inside look-behind assertions. Thanks to Cedric for the report. - New vacuum and analyze hourly reports and graphs. Thanks to Guillaume Lelarge for the patch. UPGRADE: if you are running pgbadger by cron take care if you were using one of the following option: --enable-log_min_duration and --enable-log_duration, they have been removed and pgbadger will refuse to start. 2013-01-17 - Version 2.3 This release fixes several major issues especially with csvlog and a memory leak with log parsing using a start date. There's also several improvement like new reports of number of queries by database and application. Mouse over reported queries will show database, user, remote client and application name where they are executed. A new binary input/output format have been introduced to allow saving or reading precomputed statistics. This will allow incremental reports based on periodical runs of pgbader. This is a work in progress fully available with next coming major release. Several SQL code beautifier improvement from pgFormatter have also been merged. - Clarify misleading statement about log_duration: log_duration may be turned on depending on desired information. Only log_statement must not be on. Thanks to Matt Romaine for the patch. - Fix --dbname and --dbuser not working with csvlog format. Thanks to Luke Cyca for the report. - Fix issue in SQL formatting that prevent left back indentation when major keywords were found. Thanks to Kevin Brannen for the report. - Display 3 decimals in time report so that ms can be seen. Thanks to Adam Schroder for the request. - Force the parser to not insert a new line after the SET keyword when the query begin with it. This is to preserve the single line with queries like SET client_encoding TO "utf8"; - Add better SQL formatting of update queries by adding a new line after the SET keyword. Thanks to pilat66 for the report. - Update copyright and documentation. - Queries without application name are now stored under others application name. - Add report of number of queries by application if %a is specified in the log_line_prefix. - Add link menu to the request per database and limit the display of this information when there is more than one database. - Add report of requests per database. - Add report of user,remote client and application name to all request info. - Fix memory leak with option -b (--begin) and in incremental log parsing mode. - Remove duration part from log format auto-detection. Thanks to Guillaume Lelarge for the report. - Fix a performance issue on prettifying SQL queries that makes pgBagder several time slower that usual to generate the HTML output. Thanks to Vincent Laborie for the report. - Add missing SQL::Beautify paternity. - Add 'binary' format as input/output format. The binary output format allows to save log statistics in a non human readable file instead of an HTML or text file. These binary files might then be used as regular input files, combined or not, to produce a html or txt report. Thanks to Jehan Guillaume de Rorthais for the patch. - Remove port from the session regex pattern to match all lines. - Fix the progress bar. It was trying to use gunzip to get real file size for all formats (by default). Unbreak the bz2 format (that does not report real size) and add support for zip format. Thanks to Euler Taveira de Oliveira fort the patch. - Fix some typos and grammatical issues. Thanks to Euler Taveira de Oliveira fort the patch. - Improve SQL code highlighting and keywords detection merging change from pgFormatter project. - Add support to hostname or ip address in the client detection. Thanks to stuntmunkee for the report. - pgbadger will now only reports execute statement of the extended protocol (parse/bind/execute). Thanks to pierrestroh for the report. - Fix numerous typos as well as formatting and grammatical issues. Thanks to Thom Brown for the patch. - Add backward compatibility to obsolete --client command line option. If you were using the short option -c nothing is changed. - Fix issue with --dbclient and %h in log_line_prefix. Thanks to Julien Rouhaud for the patch. - Fix multiline progress bar output. - Allow usage of a dash into database, user and application names when prefix is used. Thanks to Vipul for the report. - Mouse over queries will now show in which database they are executed in the overviews (Slowest queries, Most frequent queries, etc. ). Thank to Dirk-Jan Bulsink for the feature request. - Fix missing keys on %cur_info hash. Thanks to Marc Cousin for the report. - Move opening file handle to log file into a dedicated function. Thanks to Marc Cousin for the patch. - Replace Ctrl+M by printable \r. Thanks to Marc Cousin for the report. 2012-11-13 - Version 2.2 This release add some major features like tsung output, speed improvement with csvlog, report of shut down events, new command line options to generate report excluding some user(s), to build report based on select queries only, to specify regex of the queries that must only be included in the report and to remove comments from queries. Lot of bug fixes, please upgrade. - Update PostgreSQL keywords list for 9.2 - Fix number of queries in progress bar with tsung output. - Remove obsolete syslog-ng and temporary syslog-ll log format added to fix some syslog autodetection issues. There is now just one syslog format: syslog, differences between syslog formats are detected and the log parser is adaptive. - Add comment about the check_incremental_position() method - Fix reports with empty graphs when log files were not in chronological order. - Add report of current total of queries and events parsed in progress bar. Thanks to Jehan-Guillaume de Rorthais for the patch. - Force pgBadger to use an require the XS version of Text::CSV instead of the Pure Perl implementation. It is a good bit faster thanks to David Fetter for the patch. Note that using csvlog is still a bit slower than syslog or stderr log format. - Fix several issue with tsung output. - Add report of shut down events - Add debug information on command line used to pipe compressed log file when -v is provide. - Add -U | --exclude-user command line option to generate report excluded user. Thanks to Birta Levente for the feature request. - Allow some options to be specified multiple time or be written as a coma separated list of value, here are these options: --dbname, --dbuser, --dbclient, --dbappname, --exclude_user. - Add -S | --select-only option to build report only on select queries. - Add first support to tsung output, see usage. Thanks to Guillaume Lelarge for the feature request. - Add --include-query and --include-file to specify regex of the queries that must only be included in the report. Thanks to Marc Cousin for the feature request. - Fix auto detection of log_duration and log_min_duration_statement format. - Fix parser issue with Windows logs without timezone information. Thanks to Nicolas Thauvin for the report. - Fix bug in %r = remote host and port log line prefix detection. Thanks to Hubert Depesz Lubaczewski for the report. - Add -C | --nocomment option to remove comment like /* ... */ from queries. Thanks to Hubert Depesz Lubaczewski for the feature request. - Fix escaping of log_line_prefix. Thanks to Hubert Depesz Lubaczewski for the patch. - Fix wrong detection of update queries when a query has a object names containing update and set. Thanks to Vincent Laborie for the report. 2012-10-10 - Version 2.1 This release add a major feature by allowing any custom log_line_prefix to be used by pgBadger. With stderr output you at least need to log the timestamp (%t) the pid (%p) and the session/line number (%l). Support to log_duration instead of log_min_duration_statement to allow reports simply based on duration and count report without query detail and report. Lot of bug fixes, please upgrade asap. - Add new --enable-log_min_duration option to force pgbadger to use lines generated by the log_min_duration_statement even if the log_duration format is autodetected. Useful if you use both but do not log all queries. Thanks to Vincent Laborie for the feature request. - Add syslog-ng format to better handle syslog traces with notation like: [ID * local2.info]. It is autodetected but can be forced in the -f option with value set to: syslog-ng. - Add --enable-log_duration command line option to force pgbadger to only use the log_duration trace even if log_min_duration_statement traces are autodetected. - Fix display of empty hourly graph when no data were found. - Remove query type report when log_duration is enabled. - Fix a major bug in query with bind parameter. Thanks to Marc Cousin for the report. - Fix detection of compressed log files and allow automatic detection and uncompress of .gz, .bz2 and .zip files. - Add gunzip -l command to find the real size of a gzip compressed file. - Fix log_duration only reports to not take care about query detail but just count and duration. - Fix issue with compressed csvlog. Thanks to Philip Freeman for the report. - Allow usage of log_duration instead of log_min_duration_statement to just collect statistics about the number of queries and their time. Thanks to Vincent Laborie for the feature request. - Fix issue on syslog format and autodetect with additional info like: [ID * local2.info]. Thanks to kapsalar for the report. - Removed unrecognized log line generated by deadlock_timeout. - Add missing information about unsupported csv log input from stdin. It must be read from a file. Thank to Philip Freeman for the report. - Fix issue #28: Illegal division by zero with log file without query and txt output. Thanks to rlowe for the report. - Update documentation about the -N | --appname option. - Rename --name option into --appname. Thanks to Guillaume Lellarge for the patch. - Fix min/max value in xasis that was always represented 2 days by default. Thanks to Casey Allen Shobe for the report. - Fix major bug when running pgbadger with the -e option. Thanks to Casey Allen Shobe for the report and the great help - Change project url to http://dalibo.github.com/pgbadger/. Thanks to Damien Clochard for this new hosting. - Fix lot of issues in CSV parser and force locale to be C. Thanks to Casey Allen Shobe for the reports. - Improve speed with custom log_line_prefix. - Merge pull request #26 from elementalvoid/helpdoc-fix - Fixed help text for --exclude-file. Old help text indicated that the option name was --exclude_file which was incorrect. - Remove the obsolete --regex-user and --regex-db options that was used to specify a search pattern in the log_line_prefix to find the user and db name. This is replaced by the --prefix option. - Replace Time column report header by Hour. - Fix another issue in log_line_prefix parser with stderr format - Add a more complex example using log_line_prefix - Fix log_line_prefix issue when using timepstamp with millisecond. - Add support to use any custom log_line_prefix with new option -p or --prefix. See README for an example. - Fix false autodetection of CSV format when log_statement is enable or in possible other cases. This was resulting in error: "FATAL: cannot use CSV". Thanks to Thomas Reiss for the report. - Fix display of empty graph of connections per seconds - Allow character : in log line prefix, it will no more break the log parsing. Thanks to John Rouillard for the report. - Add report of configuration parameter changes into the errors report and change errors report by events report to handle important messages that are not errors. - Allow pgbadger to recognize " autovacuum launcher" messages. 2012-08-21 - version 2.0 This major version adds some changes not backward compatible with previous versions. Options -p and -g are not more used as progress bar and graphs generation are enabled by default now. The obsolete -l option use to specify the log file to parse has been reused to specify an incremental file. Outside these changes and some bug fix there's also new features: * Using an incremental file with -l option allow to parse multiple time a single log file and to "seek" at the last line parsed during the previous run. Useful if you have a log rotation not sync with your pgbadger run. For exemple you can run somthing like this: pgbadger `find /var/log/postgresql/ -name "postgresql*" -mtime -7 -type f` \ -o report_`date +%F`.html -l /var/run/pgbadger/last_run.log * All queries diplayed in the HTML report are now clickable to display or hide a nice SQL query format. This is called SQL format beautifier. * CSV log parser have been entirely rewritten to handle csv with multiline. Every one should upgrade. - Change license from BSD like to PostgreSQL license. Request from Robert Treat. - Fix wrong pointer on Connections per host menu. Reported by Jean-Paul Argudo. - Small fix for sql formatting adding scrollbars. Patch by Julien Rouhaud. - Add SQL format beautifier on SQL queries. When you will click on a query it will be beautified. Patch by Gilles Darold - The progress bar is now enabled by default, the -p option has been removed. Use -q | --quiet to disable it. Patch by Gilles Darold. - Graphs are now generated by default for HTML output, option -g as been remove and option -G added to allow disabling graph generation. Request from Julien Rouhaud, patch by Gilles Darold. - Remove option -g and -p to the documentation. Patch by Gilles Darold. - Fix case sensitivity in command line options. Patch by Julien Rouhaud. - Add -T|--title option to change report title. Patch by Yury Bushmelev. - Add new option --exclude-file to exclude specific commands with regex stated in a file. This is a rewrite by Gilles Darold of the neoeahit (Vipul) patch. - CSV log parser have been entirely rewritten to handle csv with multi line, it also adds approximative duration for csvlog. Reported by Ludhimila Kendrick, patch by Gilles Darold. - Alphabetical reordering of options list in method usage() and documentation. Patch by Gilles Darold. - Remove obsolete -l | --logfile command line option, the -l option will be reused to specify an incremental file. Patch by Gilles Darold. - Add -l | --last-parsed options to allow incremental run of pgbadger. Patch by Gilles Darold. - Replace call to timelocal_nocheck by timegm_nocheck, to convert date time into second from the epoch. This should fix timezone issue. Patch by Gilles Darold. - Change regex on log parser to allow missing ending space in log_line_prefix. This seems a common mistake. Patch by Gilles Darold. - print warning when an empty log file is found. Patch by Gilles Darold. - Add perltidy rc file to format pgbadger Perl code. Patch from depesz. 2012-07-15 - version 1.2 This version adds some reports and fixes a major issue in log parser. Every one should upgrade. - Rewrite this changelog to be human readable. - Add -v | --verbose to enable debug mode. It is now disable by default - Add hourly report of checkpoint warning when checkpoints are occuring too frequently, it will display the hourly count and the average occuring time. - Add new report that sums the messages by log types. The report shows the number of messages of each log type, and a percentage. It also displays a pie graph. Patch by Guillaume Lelarge. - Add missing pie graph on locks by type report. - Format pie mouse track to display values only. - Fix graph download button id on new connection graph. - Add trackFormatter to flotr2 line graphs to show current x/y values. - Fix issue on per minute minimum value. - Add a note about Windows Os and zcat as well as a more general note about using compressed log file in other format than gzip. - Complete rewrite of the log parser to handle unordered log lines. Data are now stored by pid before and added to the global statistics at end. Error report now include full details, statements, contexts and hints when available. Deadlock are also fully reported with the concerned queries. - Fix miss handling of multi lines queries on syslog. - Add -a|--average option to configure the per minutes average interval for queries and connexions. If you want the average to be calculated each minutes instead of the 5 per default, use --average 1 or for the default --average 5. If you want average per hour set it to 60. - Add hourly statistics of connections and sessions as well as a chart about the number of connection per second (5 minutes average). - Allow OTHERS type of queries lower than 2% to be include in the sum of types < 2%. - Add autodetection of syslog ident name if different than the default "postgres" and that there is just one ident name in the log. - Remove syslog replacement of tabulation by #011 still visible when there was multiple tabulation. - Fix autodetection of log format syslog with single-digit day number in date. - Add ChangeLog to MANIFEST and change URI in html footer. - Check pgBadger compatibility with Windows Oses. Run perfectly. 2012-07-04 - version 1.1 This release fixes lot of issues and adds several main features. New feature: - Add possibility to get log from stdin - Change syslog parsing regex to allow log timestamp in log_line_prefix very often forgotten when log destination is changed from stderr to syslog. - Add documentation for the -z | --zcat command line option. - Allow `zcat` location to be specified via `--zcat` - David E. Wheeler - Add --disable-session,--disable-connection and disable-checkpoint command line options to remove their respective reports from the output - Add --disable-query command line option to remove queries statistics from the output - Add --disable-hourly command line option to remove hourly statistics from the output - Add --disable-error command line option to remove error report from the output - Add --exclude-query option to exclude types of queries by specifying a regex - Set thousand separator and decimal separator to be locale dependant - Add -w option to only report errors - Add Makefile.PL and full POD documentation to the project - Allow multiple log files from command line - Add simple csvlog support - Alex Hunsaker - Hourly report for temporary files and checkpoints have moved in a separate table. - Add hourly connections and sessions statistics. - Add a chart about the number of connections per seconds. Bug fix: - Add information about log format requirement (lc_message = 'C'). Reported by Alain Benard. - Fix for begin/end dates with single digit day using syslog. Patch by Joseph Marlin. - Fix handle of syslog dates with single-digit day number. Patch by Denis Orlikhin. - Fix many English syntax in error messages and documentation. Patch by Joseph Marlin. - Fix non terminated TH html tag in checkpoint hourly table. Reported by Joseph Marlin. - "Log file" section will now only report first and last log file parsed - Fix empty output in hourly temporary file stats. - Fix wrapping query that goes out of the table and makes the window scroll horizontally. Asked by Isaac Reuben. - Fix code where != was replaced by $$CLASSSY0A$$!=$$CLASSSY0B$$ in the output. Reported by Isaac Reuben - Fix and review text report output. - Fix an issue in SQL code highligh replacement. - Complete review of the HTML output. - Add .gitignore for swap files. Patch by Vincent Picavet - Fix wrong variable for user and database filter. Patch by Vincent Picavet. - Change default regexp for user and db to be able to detect both. Patch by Vincent Picavet. - Fix false cur_date when using syslog and allow -b and -e options to work. Patch by Vincent Picavet. - Fix some case where logs where not detected as PostgreSQL log lines. - Added explanation for --begin and --end datetime setting. Patch by ragged. - Added -v / --version. Patch by ragged. - Fix usage information and presentation in README file. 2012-05-04 - version to 1.0 First public release of pgBadger. New feature: - Add graph of ckeckpoint Wal files usage (added, removed, recycled). - Add --image-format to allow the change of the default png image format to jpeg. - Allow download of all pie graphics as images. - Add --pie-limit to sum all data lower than this percentage limit to avoid label overlap. - Allow download of graphics as PNG images. - Replace GD::Graph by the Flotr2 javascript library to draw graphics. Patch by Guillaume Lelarge - Add pie graphs for session, database, user and host. Add a --quiet option to remove debug output and --progress to show a progress bar during log parsing - Add pie graph for Queries by type. - Add graph for checkpoint write buffer per hours - Allow log parsing without any log_line_prefix and extend it to be defined by the user. Custom log_line prefix can be parsed using user defined regex with command line option --regex-db and --regex-user. For exemple the default regex of pgbadger to parse user and db name from log_line_prefix can be written like this: pgbadger -l mylogfile.log --regex-user="user=([^,]*)," \ --regex-db="db=([^,]*)" - Separe log_line_prefix from log level part in the parser to extend log_line_prefix parsing - If there is just one argument, assume it is the logfile and use default value for all other parameters - Add autodetection of log format (syslog or stderr) if none is given with option -f - Add --outfile option to dump output to a file instead of stdout. Default filename is out.html or out.txt following the output format. To dump to stdout set filename to - - Add --version command line option to show current pgbadger version. Bug fix: - Rearrange x and y axis - Fix legend opacity on graphics - Rearrange Overall stats view - Add more "normalization" on errors messages - Fix samples error with normalyzed error instead of real error message - Fix an other average size of temporary file decimal limit - Force quiet mode when --progress is used - Fix per sessions graphs - Fix sort order of days/hours into hours array - Fix sort order of days into graphics - Remove display of locks, sessions and connections statistics when none are available - Fix display of empty column of checkpoint when no checkpoint was found in log file pgbadger-9.2/LICENSE000066400000000000000000000016071313662170200141510ustar00rootroot00000000000000Copyright (c) 2012-2017, Dalibo Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL Dalibo BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Dalibo HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Dalibo SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Dalibo HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pgbadger-9.2/MANIFEST000066400000000000000000000001211313662170200142630ustar00rootroot00000000000000LICENSE Makefile.PL MANIFEST META.yml pgbadger README doc/pgBadger.pod ChangeLog pgbadger-9.2/META.yml000066400000000000000000000005161313662170200144130ustar00rootroot00000000000000# http://module-build.sourceforge.net/META-spec.html #XXXXXXX This is a prototype!!! It will change in the future!!! XXXXX# name: pgBadger version: 8.1 version_from: pgbadger installdirs: site recommends: Text::CSV_XS: 0 distribution_type: script generated_by: ExtUtils::MakeMaker version 6.17 pgbadger-9.2/Makefile.PL000066400000000000000000000025701313662170200151160ustar00rootroot00000000000000use ExtUtils::MakeMaker; # See lib/ExtUtils/MakeMaker.pm for details of how to influence # the contents of the Makefile that is written. use strict; my @ALLOWED_ARGS = ('INSTALLDIRS','DESTDIR'); # Parse command line arguments and store them as environment variables while ($_ = shift) { my ($k,$v) = split(/=/, $_, 2); if (grep(/^$k$/, @ALLOWED_ARGS)) { $ENV{$k} = $v; } } $ENV{DESTDIR} =~ s/\/$//; # Default install path my $DESTDIR = $ENV{DESTDIR} || ''; my $INSTALLDIRS = $ENV{INSTALLDIRS} || 'site'; my %merge_compat = (); if ($ExtUtils::MakeMaker::VERSION >= 6.46) { %merge_compat = ( 'META_MERGE' => { resources => { homepage => 'http://projects.dalibo.org/pgbadger', repository => { type => 'git', git => 'git@github.com:dalibo/pgbadger.git', web => 'https://github.com/dalibo/pgbadger', }, }, } ); } WriteMakefile( 'DISTNAME' => 'pgbadger', 'NAME' => 'pgBadger', 'VERSION_FROM' => 'pgbadger', 'dist' => { 'COMPRESS'=>'gzip -9f', 'SUFFIX' => 'gz', 'ZIP'=>'/usr/bin/zip','ZIPFLAGS'=>'-rl' }, 'AUTHOR' => 'Gilles Darold (gilles@darold.net)', 'ABSTRACT' => 'pgBadger - PostgreSQL log analysis report', 'EXE_FILES' => [ qw(pgbadger) ], 'MAN1PODS' => { 'doc/pgBadger.pod' => 'blib/man1/pgbadger.1p' }, 'DESTDIR' => $DESTDIR, 'INSTALLDIRS' => $INSTALLDIRS, 'clean' => {}, %merge_compat ); pgbadger-9.2/README000066400000000000000000000741171313662170200140320ustar00rootroot00000000000000NAME pgBadger - a fast PostgreSQL log analysis report SYNOPSIS Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average N : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average N : number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log. -B | --bar-graph : use bar graph instead of line by default. -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log. -f | --format logtype : possible values: syslog, syslog2, stderr, csv and pgbouncer. Use this option when pgBadger is not able to auto-detect the log format Default: stderr. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Default is 1, run as single process. -J | --Jobs number : number of log file to parse in parallel. Default is 1, run as single process. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | logfile-list file : file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default: no truncate -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. With module JSON::XS installed, you can output file in JSON format either. To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir dirpath : set the path of the directory where the pid file will be written to be able to run two pgBadger at the same time. --rebuild : used to rebuild all html reports in incremental output directories where there is binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, weeks start on sunday. Use this option to start on monday. --normalized-only : only dump all normalized query to out.txt pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-9.1-main.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: [%l-1] user=%u,db=%d ' /pglog/postgresql-9.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have others PostgreSQL log files to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). DESCRIPTION pgBadger is a PostgreSQL log analyzer built for speed with fully reports from your PostgreSQL log file. It's a single and small Perl script that outperforms any other PostgreSQL log analyzer. It is written in pure Perl and uses a JavaScript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap JavaScript library and the FontAwesome webfont for better design. Everything is embedded. pgBadger is able to autodetect your log file format (syslog, stderr or csvlog). It is designed to parse huge log files as well as gzip compressed files. See a complete list of features below. Supported compressed format are gzip, bzip2 and xz. For the xz format you must have an xz version upper than 5.05 that supports the --robot option. All charts are zoomable and can be saved as PNG images. You can also limit pgBadger to only report errors or remove any part of the report using command line options. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify the %t and %p patterns. pgBadger allows parallel processing of a single log file or multiple files through the use of the -j option specifying the number of CPUs. If you want to save system performance you can also use log_duration instead of log_min_duration_statement to have reports on duration and number of queries only. FEATURE pgBadger reports everything about your SQL queries: Overall statistics. The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. Queries generating the largest temporary files. The slowest queries. Queries that took up the most time. The most frequent queries. The most frequent errors. Histogram of query times. Histogram of sessions times. Users involved in top queries. Applications involved in top queries. Queries generating the most cancellation. Queries most cancelled. The following reports are also available with hourly charts divided into periods of five minutes: SQL queries statistics. Temporary file statistics. Checkpoints statistics. Autovacuum and autoanalyze statistics. Cancelled queries. Error events (panic, fatal, error and warning). Error class distribution. There are also some pie charts about distribution of: Locks statistics. Queries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client/application. Connections per database/user/client/application. Autovacuum and autoanalyze per table. Queries per user and total duration per user. All charts are zoomable and can be saved as PNG images. SQL queries reported are highlighted and beautified automatically. pgBadger is also able to parse PgBouncer log files and to create the following reports: Request Throughput Bytes I/O Throughput Queries Average duration Simultaneous sessions Histogram of sessions times Sessions per database Sessions per user Sessions per host Established connections Connections per database Connections per user Connections per host Most used reserved pools Most Frequent Errors/Events You can also have incremental reports with one report per day and a cumulative report per week. Two multiprocess modes are available to speed up log parsing, one using one core per log file, and the second using multiple cores to parse a single file. These modes can be combined. Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries/errors occurring per hour, but you can specify the granularity down to the minute. pgBadger can also be used in a central place to parse remote log files using a passwordless SSH connection. This mode can be used with compressed files and in the multiprocess per file mode (-J) but can not be used with the CSV log format. REQUIREMENT pgBadger comes as a single Perl script - you do not need anything other than a modern Perl distribution. Charts are rendered using a JavaScript library so you don't need anything other than a web browser. Your browser will do all the work. If you planned to parse PostgreSQL CSV log files you might need some Perl Modules: Text::CSV_XS - to parse PostgreSQL CSV log files. This module is optional, if you don't have PostgreSQL log in the CSV format you don't need to install it. If you want to export statistics as JSON file you need an additional Perl module: JSON::XS - JSON serialising/deserialising, done correctly and fast This module is optional, if you don't select the json output format you don't need to install it. Compressed log file format is autodetected from the file extension. If pgBadger find a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat and if the file extension is zip or xz then the unzip or xz utilities will be used. If those utilities are not found in the PATH environment variable then use the --zcat command line option to change this path. For example: --zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc" --zcat="C:\tools\unzip -p" By default pgBadger will use the zcat, bzcat and unzip utilities following the file extension. If you use the default autodetection compress format you can mixed gz, bz2, xz or zip files. Specifying a custom value to --zcat option will remove this feature of mixed compressed format. Note that multiprocessing can not be used with compressed files or CSV files as well as under Windows platform. INSTALLATION Download the tarball from GitHub and unpack the archive as follow: tar xzf pgbadger-7.x.tar.gz cd pgbadger-7.x/ perl Makefile.PL make && sudo make install This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: perl Makefile.PL INSTALLDIRS=vendor By default INSTALLDIRS is set to site. POSTGRESQL CONFIGURATION You must enable and set some configuration directives in your postgresql.conf before starting. You must first enable SQL query logging to have something to parse: log_min_duration_statement = 0 Here every statement will be logged, on a busy server you may want to increase this value to only log queries with a longer duration. Note that if you have log_statement set to 'all' nothing will be logged through the log_min_duration_statement directive. See the next chapter for more information. With 'stderr' log format, log_line_prefix must be at least: log_line_prefix = '%t [%p]: [%l-1] ' Log line prefix could add user, database name, application name and client ip address as follows: log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ' or for syslog log file format: log_line_prefix = 'user=%u,db=%d,app=%aclient=%h ' Log line prefix for stderr output could also be: log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h ' or for syslog output: log_line_prefix = 'db=%d,user=%u,app=%a,client=%h ' You need to enable other parameters in postgresql.conf to get more information from your log files: log_checkpoints = on log_connections = on log_disconnections = on log_lock_waits = on log_temp_files = 0 log_autovacuum_min_duration = 0 log_error_verbosity = default Do not enable log_statement as its log format will not be parsed by pgBadger. Of course your log messages should be in English without locale support: lc_messages='C' but this is not only recommended by pgBadger. Note: the session line [%l-1] is just used to match the default prefix for "stderr". The -1 has no real purpose and basically is not used in pgBadger statistics / graphs. You can safely remove them from the log_line_prefix but you will need to set the --prefix command line option accordingly. log_min_duration_statement, log_duration and log_statement If you want the query statistics to include the actual query strings, you must set log_min_duration_statement to 0 or more milliseconds. If you just want to report duration and number of queries and don't want all details about queries, set log_min_duration_statement to -1 to disable it and enable log_duration in your postgresql.conf file. If you want to add the most common request report you can either choose to set log_min_duration_statement to a higher value or choose to enable log_statement. Enabling log_min_duration_statement will add reports about slowest queries and queries that took up the most time. Take care that if you have log_statement set to 'all' nothing will be logged with log_line_prefix. PARALLEL PROCESSING To enable parallel processing you just have to use the -j N option where N is the number of cores you want to use. pgBadger will then proceed as follow: for each log file chunk size = int(file size / N) look at start/end offsets of these chunks fork N processes and seek to the start offset of each chunk each process will terminate when the parser reach the end offset of its chunk each process write stats into a binary temporary file wait for all children has terminated All binary temporary files generated will then be read and loaded into memory to build the html output. With that method, at start/end of chunks pgBadger may truncate or omit a maximum of N queries per log file which is an insignificant gap if you have millions of queries in your log file. The chance that the query that you were looking for is lost is near 0, this is why I think this gap is livable. Most of the time the query is counted twice but truncated. When you have many small log files and many CPUs it is speedier to dedicate one core to one log file at a time. To enable this behavior you have to use option -J N instead. With 200 log files of 10MB each the use of the -J option starts being really interesting with 8 Cores. Using this method you will be sure not to lose any queries in the reports. He are a benchmark done on a server with 8 CPUs and a single file of 9.5GB. Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+---------+-------+-------+------ -j | 1h41m18 | 50m25 | 25m39 | 15m58 -J | 1h41m18 | 54m28 | 41m16 | 34m45 With 200 log files of 10MB each and a total of 2GB the results are slightly different: Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+-------+-------+-------+------ -j | 20m15 | 9m56 | 5m20 | 4m20 -J | 20m15 | 9m49 | 5m00 | 2m40 So it is recommended to use -j unless you have hundreds of small log files and can use at least 8 CPUs. IMPORTANT: when you are using parallel parsing pgBadger will generate a lot of temporary files in the /tmp directory and will remove them at the end, so do not remove those files unless pgBadger is not running. They are all named with the following template tmp_pgbadgerXXXX.bin so they can be easily identified. INCREMENTAL REPORTS pgBadger includes an automatic incremental report mode using option -I or --incremental. When running in this mode, pgBadger will generate one report per day and a cumulative report per week. Output is first done in binary format into the mandatory output directory (see option -O or --outdir), then in HTML format for daily and weekly reports with a main index file. The main index file will show a dropdown menu per week with a link to each week report and links to daily reports of each week. For example, if you run pgBadger as follows based on a daily rotated file: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 \ -O /var/www/pg_reports/ you will have all daily and weekly reports for the full running period. In this mode pgBadger will create an automatic incremental file in the output directory, so you don't have to use the -l option unless you want to change the path of that file. This means that you can run pgBadger in this mode each day on a log file rotated each week, and it will not count the log entries twice. To save disk space you may want to use the -X or --extra-files command line option to force pgBadger to write JavaScript and CSS to separate files in the output directory. The resources will then be loaded using script and link tags. BINARY FORMAT Using the binary format it is possible to create custom incremental and cumulative reports. For example, if you want to refresh a pgBadger report each hour from a daily PostgreSQL log file, you can proceed by running each hour the following commands: pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log to generate the incremental data files in binary format. And to generate the fresh HTML report from that binary file: pgbadger sunday/*.bin Or as another example, if you generate one log file per hour and you want reports to be rebuilt each time the log file is rotated, proceed as follows: pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log ... When you want to refresh the HTML report, for example each time after a new binary file is generated, just do the following: pgbadger -o day1_report.html day1/*.bin Adjust the commands to suit your particular needs. JSON FORMAT JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger result into other monitoring tools like Cacti or Graphite. AUTHORS pgBadger is an original work from Gilles Darold. The pgBadger logo is an original creation of Damien Clochard. The pgBadger v4.x design comes from the "Art is code" company. This web site is a work of Gilles Darold. pgBadger is maintained by Gilles Darold, the good folks at Dalibo, and every one who wants to contribute. Many people have contributed to pgBadger, they are all quoted in the Changelog file. LICENSE pgBadger is free software distributed under the PostgreSQL Licence. Copyright (c) 2012-2017, Dalibo A modified version of the SQL::Beautify Perl Module is embedded in pgBadger with copyright (C) 2009 by Jonas Kramer and is published under the terms of the Artistic License 2.0. pgbadger-9.2/doc/000077500000000000000000000000001313662170200137055ustar00rootroot00000000000000pgbadger-9.2/doc/pgBadger.pod000066400000000000000000000660531313662170200161360ustar00rootroot00000000000000=head1 NAME pgBadger - a fast PostgreSQL log analysis report =head1 SYNOPSIS Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average N : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average N : number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log. -B | --bar-graph : use bar graph instead of line by default. -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log. -f | --format logtype : possible values: syslog, syslog2, stderr, csv and pgbouncer. Use this option when pgBadger is not able to auto-detect the log format Default: stderr. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Default is 1, run as single process. -J | --Jobs number : number of log file to parse in parallel. Default is 1, run as single process. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | logfile-list file : file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default: no truncate -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. With module JSON::XS installed, you can output file in JSON format either. To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir dirpath : set the path of the directory where the pid file will be written to be able to run two pgBadger at the same time. --rebuild : used to rebuild all html reports in incremental output directories where there is binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, weeks start on sunday. Use this option to start on monday. --normalized-only : only dump all normalized query to out.txt pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,client=%h' /pglog/postgresql-2012-08-21* pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster pgbadger -j 8 /pglog/postgresql-9.1-main.log Generate Tsung sessions XML file with select queries only: pgbadger -S -o sessions.tsung --prefix '%t [%p]: [%l-1] user=%u,db=%d ' /pglog/postgresql-9.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` -o /var/reports/pg_errors-`date +%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have others PostgreSQL log files to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). =head1 DESCRIPTION pgBadger is a PostgreSQL log analyzer built for speed with fully reports from your PostgreSQL log file. It's a single and small Perl script that outperforms any other PostgreSQL log analyzer. It is written in pure Perl and uses a JavaScript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap JavaScript library and the FontAwesome webfont for better design. Everything is embedded. pgBadger is able to autodetect your log file format (syslog, stderr or csvlog). It is designed to parse huge log files as well as gzip compressed files. See a complete list of features below. Supported compressed format are gzip, bzip2 and xz. For the xz format you must have an xz version upper than 5.05 that supports the --robot option. All charts are zoomable and can be saved as PNG images. You can also limit pgBadger to only report errors or remove any part of the report using command line options. pgBadger supports any custom format set into the log_line_prefix directive of your postgresql.conf file as long as it at least specify the %t and %p patterns. pgBadger allows parallel processing of a single log file or multiple files through the use of the -j option specifying the number of CPUs. If you want to save system performance you can also use log_duration instead of log_min_duration_statement to have reports on duration and number of queries only. =head1 FEATURE pgBadger reports everything about your SQL queries: Overall statistics. The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. Queries generating the largest temporary files. The slowest queries. Queries that took up the most time. The most frequent queries. The most frequent errors. Histogram of query times. Histogram of sessions times. Users involved in top queries. Applications involved in top queries. Queries generating the most cancellation. Queries most cancelled. The following reports are also available with hourly charts divided into periods of five minutes: SQL queries statistics. Temporary file statistics. Checkpoints statistics. Autovacuum and autoanalyze statistics. Cancelled queries. Error events (panic, fatal, error and warning). Error class distribution. There are also some pie charts about distribution of: Locks statistics. Queries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client/application. Connections per database/user/client/application. Autovacuum and autoanalyze per table. Queries per user and total duration per user. All charts are zoomable and can be saved as PNG images. SQL queries reported are highlighted and beautified automatically. pgBadger is also able to parse PgBouncer log files and to create the following reports: Request Throughput Bytes I/O Throughput Queries Average duration Simultaneous sessions Histogram of sessions times Sessions per database Sessions per user Sessions per host Established connections Connections per database Connections per user Connections per host Most used reserved pools Most Frequent Errors/Events You can also have incremental reports with one report per day and a cumulative report per week. Two multiprocess modes are available to speed up log parsing, one using one core per log file, and the second using multiple cores to parse a single file. These modes can be combined. Histogram granularity can be adjusted using the -A command line option. By default they will report the mean of each top queries/errors occurring per hour, but you can specify the granularity down to the minute. pgBadger can also be used in a central place to parse remote log files using a passwordless SSH connection. This mode can be used with compressed files and in the multiprocess per file mode (-J) but can not be used with the CSV log format. =head1 REQUIREMENT pgBadger comes as a single Perl script - you do not need anything other than a modern Perl distribution. Charts are rendered using a JavaScript library so you don't need anything other than a web browser. Your browser will do all the work. If you planned to parse PostgreSQL CSV log files you might need some Perl Modules: Text::CSV_XS - to parse PostgreSQL CSV log files. This module is optional, if you don't have PostgreSQL log in the CSV format you don't need to install it. If you want to export statistics as JSON file you need an additional Perl module: JSON::XS - JSON serialising/deserialising, done correctly and fast This module is optional, if you don't select the json output format you don't need to install it. Compressed log file format is autodetected from the file extension. If pgBadger find a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat and if the file extension is zip or xz then the unzip or xz utilities will be used. If those utilities are not found in the PATH environment variable then use the --zcat command line option to change this path. For example: --zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc" --zcat="C:\tools\unzip -p" By default pgBadger will use the zcat, bzcat and unzip utilities following the file extension. If you use the default autodetection compress format you can mixed gz, bz2, xz or zip files. Specifying a custom value to --zcat option will remove this feature of mixed compressed format. Note that multiprocessing can not be used with compressed files or CSV files as well as under Windows platform. =head1 INSTALLATION Download the tarball from GitHub and unpack the archive as follow: tar xzf pgbadger-7.x.tar.gz cd pgbadger-7.x/ perl Makefile.PL make && sudo make install This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: perl Makefile.PL INSTALLDIRS=vendor By default INSTALLDIRS is set to site. =head1 POSTGRESQL CONFIGURATION You must enable and set some configuration directives in your postgresql.conf before starting. You must first enable SQL query logging to have something to parse: log_min_duration_statement = 0 Here every statement will be logged, on a busy server you may want to increase this value to only log queries with a longer duration. Note that if you have log_statement set to 'all' nothing will be logged through the log_min_duration_statement directive. See the next chapter for more information. With 'stderr' log format, log_line_prefix must be at least: log_line_prefix = '%t [%p]: [%l-1] ' Log line prefix could add user, database name, application name and client ip address as follows: log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ' or for syslog log file format: log_line_prefix = 'user=%u,db=%d,app=%aclient=%h ' Log line prefix for stderr output could also be: log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h ' or for syslog output: log_line_prefix = 'db=%d,user=%u,app=%a,client=%h ' You need to enable other parameters in postgresql.conf to get more information from your log files: log_checkpoints = on log_connections = on log_disconnections = on log_lock_waits = on log_temp_files = 0 log_autovacuum_min_duration = 0 log_error_verbosity = default Do not enable log_statement as its log format will not be parsed by pgBadger. Of course your log messages should be in English without locale support: lc_messages='C' but this is not only recommended by pgBadger. Note: the session line [%l-1] is just used to match the default prefix for "stderr". The -1 has no real purpose and basically is not used in pgBadger statistics / graphs. You can safely remove them from the log_line_prefix but you will need to set the --prefix command line option accordingly. =head1 log_min_duration_statement, log_duration and log_statement If you want the query statistics to include the actual query strings, you must set log_min_duration_statement to 0 or more milliseconds. If you just want to report duration and number of queries and don't want all details about queries, set log_min_duration_statement to -1 to disable it and enable log_duration in your postgresql.conf file. If you want to add the most common request report you can either choose to set log_min_duration_statement to a higher value or choose to enable log_statement. Enabling log_min_duration_statement will add reports about slowest queries and queries that took up the most time. Take care that if you have log_statement set to 'all' nothing will be logged with log_line_prefix. =head1 PARALLEL PROCESSING To enable parallel processing you just have to use the -j N option where N is the number of cores you want to use. pgBadger will then proceed as follow: for each log file chunk size = int(file size / N) look at start/end offsets of these chunks fork N processes and seek to the start offset of each chunk each process will terminate when the parser reach the end offset of its chunk each process write stats into a binary temporary file wait for all children has terminated All binary temporary files generated will then be read and loaded into memory to build the html output. With that method, at start/end of chunks pgBadger may truncate or omit a maximum of N queries per log file which is an insignificant gap if you have millions of queries in your log file. The chance that the query that you were looking for is lost is near 0, this is why I think this gap is livable. Most of the time the query is counted twice but truncated. When you have many small log files and many CPUs it is speedier to dedicate one core to one log file at a time. To enable this behavior you have to use option -J N instead. With 200 log files of 10MB each the use of the -J option starts being really interesting with 8 Cores. Using this method you will be sure not to lose any queries in the reports. He are a benchmark done on a server with 8 CPUs and a single file of 9.5GB. Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+---------+-------+-------+------ -j | 1h41m18 | 50m25 | 25m39 | 15m58 -J | 1h41m18 | 54m28 | 41m16 | 34m45 With 200 log files of 10MB each and a total of 2GB the results are slightly different: Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU --------+-------+-------+-------+------ -j | 20m15 | 9m56 | 5m20 | 4m20 -J | 20m15 | 9m49 | 5m00 | 2m40 So it is recommended to use -j unless you have hundreds of small log files and can use at least 8 CPUs. IMPORTANT: when you are using parallel parsing pgBadger will generate a lot of temporary files in the /tmp directory and will remove them at the end, so do not remove those files unless pgBadger is not running. They are all named with the following template tmp_pgbadgerXXXX.bin so they can be easily identified. =head1 INCREMENTAL REPORTS pgBadger includes an automatic incremental report mode using option -I or --incremental. When running in this mode, pgBadger will generate one report per day and a cumulative report per week. Output is first done in binary format into the mandatory output directory (see option -O or --outdir), then in HTML format for daily and weekly reports with a main index file. The main index file will show a dropdown menu per week with a link to each week report and links to daily reports of each week. For example, if you run pgBadger as follows based on a daily rotated file: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 \ -O /var/www/pg_reports/ you will have all daily and weekly reports for the full running period. In this mode pgBadger will create an automatic incremental file in the output directory, so you don't have to use the -l option unless you want to change the path of that file. This means that you can run pgBadger in this mode each day on a log file rotated each week, and it will not count the log entries twice. To save disk space you may want to use the -X or --extra-files command line option to force pgBadger to write JavaScript and CSS to separate files in the output directory. The resources will then be loaded using script and link tags. =head1 BINARY FORMAT Using the binary format it is possible to create custom incremental and cumulative reports. For example, if you want to refresh a pgBadger report each hour from a daily PostgreSQL log file, you can proceed by running each hour the following commands: pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log to generate the incremental data files in binary format. And to generate the fresh HTML report from that binary file: pgbadger sunday/*.bin Or as another example, if you generate one log file per hour and you want reports to be rebuilt each time the log file is rotated, proceed as follows: pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log ... When you want to refresh the HTML report, for example each time after a new binary file is generated, just do the following: pgbadger -o day1_report.html day1/*.bin Adjust the commands to suit your particular needs. =head1 JSON FORMAT JSON format is good for sharing data with other languages, which makes it easy to integrate pgBadger result into other monitoring tools like Cacti or Graphite. =head1 AUTHORS pgBadger is an original work from Gilles Darold. The pgBadger logo is an original creation of Damien Clochard. The pgBadger v4.x design comes from the "Art is code" company. This web site is a work of Gilles Darold. pgBadger is maintained by Gilles Darold, the good folks at Dalibo, and every one who wants to contribute. Many people have contributed to pgBadger, they are all quoted in the Changelog file. =head1 LICENSE pgBadger is free software distributed under the PostgreSQL Licence. Copyright (c) 2012-2017, Dalibo A modified version of the SQL::Beautify Perl Module is embedded in pgBadger with copyright (C) 2009 by Jonas Kramer and is published under the terms of the Artistic License 2.0. pgbadger-9.2/pgbadger000066400000000000000000046254671313662170200146660ustar00rootroot00000000000000#!/usr/bin/env perl #------------------------------------------------------------------------------ # # pgBadger - Advanced PostgreSQL log analyzer # # This program is open source, licensed under the PostgreSQL Licence. # For license terms, see the LICENSE file. #------------------------------------------------------------------------------ # # Settings in postgresql.conf # # You should enable SQL query logging with log_min_duration_statement >= 0 # With stderr output # Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] ' # Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d ' # Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u ' # If you need report per client Ip adresses you can add client=%h or remote=%h # pgbadger will also recognized the following form: # log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,client=%h ' # or # log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,remote=%h ' # With syslog output # Log line prefix should be: log_line_prefix = 'db=%d,user=%u ' # # Additional information that could be collected and reported # log_checkpoints = on # log_connections = on # log_disconnections = on # log_lock_waits = on # log_temp_files = 0 # log_autovacuum_min_duration = 0 #------------------------------------------------------------------------------ use vars qw($VERSION); use strict qw(vars subs); use Getopt::Long qw(:config no_ignore_case bundling); use IO::File; use Benchmark; use File::Basename; use Storable qw(store_fd fd_retrieve); use Time::Local 'timegm_nocheck'; use POSIX qw(locale_h sys_wait_h _exit strftime); setlocale(LC_NUMERIC, ''); setlocale(LC_ALL, 'C'); use File::Spec qw/ tmpdir /; use File::Temp qw/ tempfile /; use IO::Handle; use IO::Pipe; use FileHandle; use Socket; use constant EBCDIC => "\t" ne "\011"; $VERSION = '9.2'; $SIG{'CHLD'} = 'DEFAULT'; my $TMP_DIR = File::Spec->tmpdir() || '/tmp'; my %RUNNING_PIDS = (); my @tempfiles = (); my $parent_pid = $$; my $interrupt = 0; my $tmp_last_parsed = ''; my @SQL_ACTION = ('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'COPY FROM', 'COPY TO', 'CTE', 'DDL', 'TCL'); my @LATENCY_PERCENTILE = sort {$a <=> $b} (99,95,90); my $graphid = 1; my $NODATA = '
NO DATASET
'; my $MAX_QUERY_LENGTH = 20480; my $terminate = 0; my %CACHE_DNS = (); my $DNSLookupTimeout = 1; # (in seconds) my $EXPLAIN_URL = 'http://explain.depesz.com/?is_public=0&is_anon=0&plan='; my $PID_DIR = $TMP_DIR; my @E2A = ( 0, 1, 2, 3,156, 9,134,127,151,141,142, 11, 12, 13, 14, 15, 16, 17, 18, 19,157, 10, 8,135, 24, 25,146,143, 28, 29, 30, 31, 128,129,130,131,132,133, 23, 27,136,137,138,139,140, 5, 6, 7, 144,145, 22,147,148,149,150, 4,152,153,154,155, 20, 21,158, 26, 32,160,226,228,224,225,227,229,231,241,162, 46, 60, 40, 43,124, 38,233,234,235,232,237,238,239,236,223, 33, 36, 42, 41, 59, 94, 45, 47,194,196,192,193,195,197,199,209,166, 44, 37, 95, 62, 63, 248,201,202,203,200,205,206,207,204, 96, 58, 35, 64, 39, 61, 34, 216, 97, 98, 99,100,101,102,103,104,105,171,187,240,253,254,177, 176,106,107,108,109,110,111,112,113,114,170,186,230,184,198,164, 181,126,115,116,117,118,119,120,121,122,161,191,208, 91,222,174, 172,163,165,183,169,167,182,188,189,190,221,168,175, 93,180,215, 123, 65, 66, 67, 68, 69, 70, 71, 72, 73,173,244,246,242,243,245, 125, 74, 75, 76, 77, 78, 79, 80, 81, 82,185,251,252,249,250,255, 92,247, 83, 84, 85, 86, 87, 88, 89, 90,178,212,214,210,211,213, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,179,219,220,217,218,159 ); if (EBCDIC && ord('^') == 106) { # as in the BS2000 posix-bc coded character set $E2A[74] = 96; $E2A[95] = 159; $E2A[106] = 94; $E2A[121] = 168; $E2A[161] = 175; $E2A[173] = 221; $E2A[176] = 162; $E2A[186] = 172; $E2A[187] = 91; $E2A[188] = 92; $E2A[192] = 249; $E2A[208] = 166; $E2A[221] = 219; $E2A[224] = 217; $E2A[251] = 123; $E2A[253] = 125; $E2A[255] = 126; } elsif (EBCDIC && ord('^') == 176) { # as in codepage 037 on os400 $E2A[21] = 133; $E2A[37] = 10; $E2A[95] = 172; $E2A[173] = 221; $E2A[176] = 94; $E2A[186] = 91; $E2A[187] = 93; $E2A[189] = 168; } my $pgbadger_logo = ''; my $pgbadger_ico = 'data:image/x-icon;base64, AAABAAEAIyMQAAEABAA8BAAAFgAAACgAAAAjAAAARgAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAgAAGRsZACgqKQA2OTcASEpJAFpdWwBoa2kAeHt5AImMigCeoZ8AsLOxAMTHxQDR1NIA 5enmAPv+/AAAAAAA///////////////////////wAAD///////////H///////////AAAP////// //9Fq7Yv////////8AAA////////8V7u7qD////////wAAD///////8B7qWN5AL///////AAAP// ///y8Avrc3rtMCH/////8AAA/////xABvbAAAJ6kAA/////wAAD////wAG5tQAAADp6RAP////AA AP//MQBd7C2lRESOWe5xAD//8AAA//8APO7iC+7e7u4A3uxwBf/wAAD/9Aju7iAAvu7u0QAN7ukA 7/AAAP/wCe7kAAAF7ugAAAHO6xD/8AAA//AK7CAAAAHO1AAAABnrEP/wAAD/8ArAAAAAAc7kAAAA AIwQ//AAAP/wCjAAAAAC3uQAAAAAHBCf8AAA//AIEBVnIATu5gAXZhAFEP/wAAD/8AIAqxdwBu7p AFoX0QIQ//AAAP/wAAPsBCAL7u4QBwfmAAD/8AAA//AAA8owAC7u7lAAKbYAAJ/wAAD/8AAAAAAA fu7uwAAAAAAA//AAAP/wAAAAAADu7u7jAAAAAAD/8AAA//AAAAAABe7u7uoAAAAAAP/wAAD/8AAA AAAL7u7u7QAAAAAAn/AAAP/wAAAAAB3u7u7uYAAAAAD/8AAA//MAAAAATu7u7u6QAAAAAP/wAAD/ /wAAAAAM7u7u7TAAAAAD//AAAP//IQAAAAKu7u7UAAAAAB//8AAA////IAAAAAju7BAAAAAP///w AAD////2AAA1je7ulUAAA/////AAAP/////xEAnO7u7pIAH/////8AAA//////9CABju6iACP/// ///wAAD////////wAAggAP////////AAAP////////8wAAA/////////8AAA///////////w//// ///////wAAD///////////////////////AAAP/////gAAAA//+//+AAAAD//Af/4AAAAP/4A//g AAAA//AA/+AAAAD/oAA/4AAAAP8AAB/gAAAA/gAAD+AAAADwAAAB4AAAAPAAAADgAAAA4AAAAGAA AADgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAAYAAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAA AOAAAABgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAAAGAAAADgAAAA4AAAAOAAAADgAAAA 8AAAAOAAAADwAAAB4AAAAPwAAAfgAAAA/gAAD+AAAAD/gAA/4AAAAP/AAH/gAAAA//gD/+AAAAD/ /Af/4AAAAP//v//gAAAA/////+AAAAA '; my %CLASS_ERROR_CODE = ( '00' => 'Successful Completion', '01' => 'Warning', '02' => 'No Data (this is also a warning class per the SQL standard)', '03' => 'SQL Statement Not Yet Complete', '08' => 'Connection Exception', '09' => 'Triggered Action Exception', '0A' => 'Feature Not Supported', '0B' => 'Invalid Transaction Initiation', '0F' => 'Locator Exception', '0L' => 'Invalid Grantor', '0P' => 'Invalid Role Specification', '0Z' => 'Diagnostics Exception', '20' => 'Case Not Found', '21' => 'Cardinality Violation', '22' => 'Data Exception', '23' => 'Integrity Constraint Violation', '24' => 'Invalid Cursor State', '25' => 'Invalid Transaction State', '26' => 'Invalid SQL Statement Name', '27' => 'Triggered Data Change Violation', '28' => 'Invalid Authorization Specification', '2B' => 'Dependent Privilege Descriptors Still Exist', '2D' => 'Invalid Transaction Termination', '2F' => 'SQL Routine Exception', '34' => 'Invalid Cursor Name', '38' => 'External Routine Exception', '39' => 'External Routine Invocation Exception', '3B' => 'Savepoint Exception', '3D' => 'Invalid Catalog Name', '3F' => 'Invalid Schema Name', '40' => 'Transaction Rollback', '42' => 'Syntax Error or Access Rule Violation', '44' => 'WITH CHECK OPTION Violation', '53' => 'Insufficient Resources', '54' => 'Program Limit Exceeded', '55' => 'Object Not In Prerequisite State', '57' => 'Operator Intervention', '58' => 'System Error (errors external to PostgreSQL itself)', '72' => 'Snapshot Failure', 'F0' => 'Configuration File Error', 'HV' => 'Foreign Data Wrapper Error (SQL/MED)', 'P0' => 'PL/pgSQL Error', 'XX' => 'Internal Error', ); #### # method used to fork as many child as wanted ## sub spawn { my $coderef = shift; unless (@_ == 0 && $coderef && ref($coderef) eq 'CODE') { print "usage: spawn CODEREF"; exit 0; } my $pid; if (!defined($pid = fork)) { print STDERR "Error: cannot fork: $!\n"; return; } elsif ($pid) { $RUNNING_PIDS{$pid} = $pid; return; # the parent } # the child -- go spawn $< = $>; $( = $); # suid progs only exit &$coderef(); } # Command line options my $journalctl_cmd = ''; my $zcat_cmd = 'gunzip -c'; my $zcat = $zcat_cmd; my $bzcat = 'bunzip2 -c'; my $ucat = 'unzip -p'; my $xzcat = 'xzcat'; my $gzip_uncompress_size = "gunzip -l %f | grep -E '^\\s*[0-9]+' | awk '{print \$2}'"; my $zip_uncompress_size = "unzip -l %f | awk '{if (NR==4) print \$1}'"; my $xz_uncompress_size = "xz --robot -l %f | grep totals | awk '{print \$5}'"; my $format = ''; my $outfile = ''; my $outdir = ''; my $incremental = ''; my $extra_files = 0; my $help = ''; my $ver = ''; my @dbname = (); my @dbuser = (); my @dbclient = (); my @dbappname = (); my @exclude_user = (); my @exclude_appname = (); my @exclude_line = (); my $ident = ''; my $top = 0; my $sample = 3; my $extension = ''; my $maxlength = 0; my $graph = 1; my $nograph = 0; my $debug = 0; my $nohighlight = 0; my $noprettify = 0; my $from = ''; my $to = ''; my $quiet = 0; my $progress = 1; my $error_only = 0; my @exclude_query = (); my @exclude_time = (); my $exclude_file = ''; my @include_query = (); my $include_file = ''; my $disable_error = 0; my $disable_hourly = 0; my $disable_type = 0; my $disable_query = 0; my $disable_session = 0; my $disable_connection = 0; my $disable_lock = 0; my $disable_temporary = 0; my $disable_checkpoint = 0; my $disable_autovacuum = 0; my $avg_minutes = 5; my $histo_avg_minutes = 60; my $last_parsed = ''; my $report_title = ''; my $log_line_prefix = ''; my $compiled_prefix = ''; my $project_url = 'http://dalibo.github.com/pgbadger/'; my $t_min = 0; my $t_max = 0; my $remove_comment = 0; my $select_only = 0; my $tsung_queries = 0; my $queue_size = 0; my $job_per_file = 0; my $charset = 'utf-8'; my $csv_sep_char = ','; my %current_sessions = (); my %pgb_current_sessions = (); my $incr_date = ''; my $last_incr_date = ''; my $anonymize = 0; my $noclean = 0; my $retention = 0; my $bar_graph = 0; my $dns_resolv = 0; my $nomultiline = 0; my $noreport = 0; my $log_duration = 0; my $logfile_list = ''; my $enable_checksum = 0; my $timezone = 0; my $pgbouncer_only = 0; my $rebuild = 0; my $week_start_monday = 0; my $use_sessionid_as_pid = 0; my $dump_normalized_only = 0; my $NUMPROGRESS = 10000; my @DIMENSIONS = (800, 300); my $RESRC_URL = ''; my $img_format = 'png'; my @log_files = (); my %prefix_vars = (); my $remote_host = ''; my $ssh_command = ''; my $ssh_bin = 'ssh'; my $ssh_identity = ''; my $ssh_user = ''; my $ssh_timeout = 10; my $ssh_options = "-o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey"; # OBSOLETE, to be removed # List of regex that match fatal error message that do not # generate disconnection line in log. This is to prevent # sessions in the sessions charts to increase continually. # See issue #176 on github my @session_closed_msg = ( qr/^(database|role) "[^"]+" does not exist$/, qr/^no pg_hba.conf entry for/, ); my $sql_prettified; # Do not display data in pie where percentage is lower than this value # to avoid label overlapping. my $pie_percentage_limit = 2; # Get the decimal separator my $n = 5 / 2; my $num_sep = ','; $num_sep = ' ' if ($n =~ /,/); # Inform the parent that it should stop iterate on parsing other files sub stop_parsing { &logmsg('DEBUG', "Received interrupt signal"); $interrupt = 1; } # With multiprocess we need to wait for all children sub wait_child { my $sig = shift; $interrupt = 2; print STDERR "Received terminating signal ($sig).\n"; if ($^O !~ /MSWin32|dos/i) { 1 while wait != -1; $SIG{INT} = \&wait_child; $SIG{TERM} = \&wait_child; foreach my $f (@tempfiles) { unlink("$f->[1]") if (-e "$f->[1]"); } } if ($last_parsed && -e "$tmp_last_parsed") { unlink("$tmp_last_parsed"); } if ($last_parsed && -e "$last_parsed.tmp") { unlink("$last_parsed.tmp"); } if (-e "$PID_DIR/pgbadger.pid") { unlink("$PID_DIR/pgbadger.pid"); } _exit(0); } $SIG{INT} = \&wait_child; $SIG{TERM} = \&wait_child; $SIG{USR2} = \&stop_parsing; $| = 1; # get the command line parameters my $result = GetOptions( "a|average=i" => \$avg_minutes, "A|histo-average=i" => \$histo_avg_minutes, "b|begin=s" => \$from, "B|bar-graph!" => \$bar_graph, "c|dbclient=s" => \@dbclient, "C|nocomment!" => \$remove_comment, "d|dbname=s" => \@dbname, "D|dns-resolv!" => \$dns_resolv, "e|end=s" => \$to, "f|format=s" => \$format, "G|nograph!" => \$nograph, "h|help!" => \$help, "i|ident=s" => \$ident, "I|incremental!" => \$incremental, "j|jobs=i" => \$queue_size, "J|job_per_file=i" => \$job_per_file, "l|last-parsed=s" => \$last_parsed, "L|logfile-list=s" => \$logfile_list, "m|maxlength=i" => \$maxlength, "M|no-multiline!" => \$nomultiline, "N|appname=s" => \@dbappname, "n|nohighlight!" => \$nohighlight, "o|outfile=s" => \$outfile, "O|outdir=s" => \$outdir, "p|prefix=s" => \$log_line_prefix, "P|no-prettify!" => \$noprettify, "q|quiet!" => \$quiet, "r|remote-host=s" => \$remote_host, 'R|retention=i' => \$retention, "s|sample=i" => \$sample, "S|select-only!" => \$select_only, "t|top=i" => \$top, "T|title=s" => \$report_title, "u|dbuser=s" => \@dbuser, "U|exclude-user=s" => \@exclude_user, "v|verbose!" => \$debug, "V|version!" => \$ver, "w|watch-mode!" => \$error_only, "x|extension=s" => \$extension, "X|extra-files!" => \$extra_files, "z|zcat=s" => \$zcat, "Z|timezone=s" => \$timezone, "pie-limit=i" => \$pie_percentage_limit, "image-format=s" => \$img_format, "exclude-query=s" => \@exclude_query, "exclude-file=s" => \$exclude_file, "exclude-appname=s" => \@exclude_appname, "include-query=s" => \@include_query, "exclude-line=s" => \@exclude_line, "include-file=s" => \$include_file, "disable-error!" => \$disable_error, "disable-hourly!" => \$disable_hourly, "disable-type!" => \$disable_type, "disable-query!" => \$disable_query, "disable-session!" => \$disable_session, "disable-connection!" => \$disable_connection, "disable-lock!" => \$disable_lock, "disable-temporary!" => \$disable_temporary, "disable-checkpoint!" => \$disable_checkpoint, "disable-autovacuum!" => \$disable_autovacuum, "charset=s" => \$charset, "csv-separator=s" => \$csv_sep_char, "exclude-time=s" => \@exclude_time, 'ssh-command=s' => \$ssh_command, 'ssh-program=s' => \$ssh_bin, 'ssh-identity=s' => \$ssh_identity, 'ssh-option=s' => \$ssh_options, 'ssh-user=s' => \$ssh_user, 'ssh-timeout=i' => \$ssh_timeout, 'anonymize!' => \$anonymize, 'noclean!' => \$noclean, 'noreport!' => \$noreport, 'log-duration!' => \$log_duration, 'enable-checksum!' => \$enable_checksum, 'journalctl=s' => \$journalctl_cmd, 'pid-dir=s' => \$PID_DIR, 'rebuild!' => \$rebuild, 'pgbouncer-only!' => \$pgbouncer_only, 'start-monday!' => \$week_start_monday, 'normalized-only!' => \$dump_normalized_only, ); die "FATAL: use pgbadger --help\n" if (not $result); $report_title = &escape_html($report_title) if $report_title; if ($ver) { print "pgBadger version $VERSION\n"; exit 0; } &usage() if ($help); # Try to load Digest::MD5 when asked if ($enable_checksum) { if (eval {require Digest::MD5;1} ne 1) { die("Can not load Perl module Digest::MD5.\n"); } else { Digest::MD5->import('md5_hex'); } } # Check if an other process is already running if (-e "$PID_DIR/pgbadger.pid") { my $is_running = 2; if ($^O !~ /MSWin32|dos/i) { eval { $is_running = `ps auwx | grep pgbadger | grep -v grep | wc -l`; chomp($is_running); }; } if (!$@ && ($is_running <= 1)) { unlink("$PID_DIR/pgbadger.pid"); } else { print "FATAL: an other process is already started or remove the file, see $PID_DIR/pgbadger.pid\n"; exit 3; } } # Create pid file unless(open(OUT, ">$PID_DIR/pgbadger.pid")) { print "FATAL: can't create pid file $PID_DIR/pgbadger.pid, $!\n"; exit 3; } print OUT $$; close(OUT); # Rewrite some command line arguments as lists &compute_arg_list(); # If pgBadger must parse remote files set the ssh command if ($remote_host) { # If no user defined ssh command if (!$ssh_command) { $ssh_command = $ssh_bin || 'ssh'; $ssh_command .= " -i $ssh_identity" if ($ssh_identity); $ssh_command .= " $ssh_options" if ($ssh_options); if ($ssh_user) { $ssh_command .= " $ssh_user\@$remote_host"; } else { $ssh_command .= " $remote_host"; } } } # Add journalctl command to the file list if not already found if ($journalctl_cmd) { if (!grep(/^\Q$journalctl_cmd\E$/, @ARGV)) { $journalctl_cmd .= " --output='short-iso'"; push(@ARGV, $journalctl_cmd); } } # Log files to be parsed are passed as command line arguments my $empty_files = 1; if ($#ARGV >= 0) { foreach my $file (@ARGV) { if ( $journalctl_cmd && ($file =~ m/\Q$journalctl_cmd\E/) ) { push(@log_files, $file); $empty_files = 0; } elsif ($file ne '-') { if (!$remote_host) { localdie("FATAL: logfile \"$file\" must exist!\n") if (not -f $file); if (-z $file) { print "WARNING: file $file is empty\n" if (!$quiet); next; } push(@log_files, $file); $empty_files = 0; } else { # Get files from remote host &logmsg('DEBUG', "Looking for remote filename using command: $ssh_command \"ls $file\""); my @rfiles = `$ssh_command "ls $file"`; foreach my $f (@rfiles) { push(@log_files, $f); } $empty_files = 0; } } else { if ($logfile_list) { localdie("FATAL: stdin input - can not be used with logfile list (-L).\n"); } push(@log_files, $file); $empty_files = 0; } } } # Read list of log file to parse from a file if ($logfile_list) { if (!-e $logfile_list) { localdie("FATAL: logfile list $logfile_list must exist!\n"); } if (not open(IN, $logfile_list)) { localdie("FATAL: can not read logfile list $logfile_list, $!.\n"); } my @files = ; close(IN); foreach my $file (@files) { chomp($file); $file =~ s/\r//; if ($file eq '-') { localdie("FATAL: stdin input - can not be used with logfile list.\n"); } if ( $journalctl_cmd && ($file =~ m/\Q$journalctl_cmd\E/) ) { push(@log_files, $file); $empty_files = 0; } elsif (!$remote_host) { localdie("FATAL: logfile $file must exist!\n") if (not -f $file); if (-z $file) { print "WARNING: file $file is empty\n" if (!$quiet); next; } $empty_files = 0; push(@log_files, $file); } else { # Get files from remote host &logmsg('DEBUG', "Looking for remote filename using command: $ssh_command \"ls $file\""); my @rfiles = `$ssh_command "ls $file"`; foreach my $f (@rfiles) { push(@log_files, $f); } $empty_files = 0; } } } # Do not warn if all log files are empty if (!$rebuild && $empty_files) { &logmsg('DEBUG', "All log files are empty, exiting..."); unlink("$PID_DIR/pgbadger.pid"); exit 0; } # Logfile is a mandatory parameter when journalctl command is not set. if ( !$rebuild && ($#log_files < 0) && !$journalctl_cmd) { if (!$quiet) { localdie("FATAL: you must give a log file at command line parameter.\n\n"); } else { unlink("$PID_DIR/pgbadger.pid"); exit 4; } } # Remove follow option from journalctl command to prevent infinit loop if ($journalctl_cmd) { $journalctl_cmd =~ s/(-f|--follow)\b//; } # Quiet mode is forced with progress bar $progress = 0 if ($quiet); # Set the default number minutes for queries and connections average $avg_minutes ||= 5; $avg_minutes = 60 if ($avg_minutes > 60); $avg_minutes = 1 if ($avg_minutes < 1); $histo_avg_minutes ||= 60; $histo_avg_minutes = 60 if ($histo_avg_minutes > 60); $histo_avg_minutes = 1 if ($histo_avg_minutes < 1); my @avgs = (); for (my $i = 0 ; $i < 60 ; $i += $avg_minutes) { push(@avgs, sprintf("%02d", $i)); } my @histo_avgs = (); for (my $i = 0 ; $i < 60 ; $i += $histo_avg_minutes) { push(@histo_avgs, sprintf("%02d", $i)); } # Set error like log level regex my $parse_regex = qr/^(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT|LOCATION)/; my $full_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT)/; my $main_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC)/; my $main_log_regex = qr/^(LOG|WARNING|ERROR|FATAL|PANIC)/; # Set syslog prefix regex my $other_syslog_line = ''; my $pgbouncer_log_format = ''; my $pgbouncer_log_parse1 = ''; my $pgbouncer_log_parse2 = ''; # Variable to store parsed data following the line prefix my @prefix_params = (); my @pgb_prefix_params = (); my @pgb_prefix_parse1 = (); my @pgb_prefix_parse2 = (); # Set default format, with multiple files format will be autodetected each time.i # This default format will be used when the autodetection fail. my $frmt = ''; if (!$rebuild) { if (!$remote_host && ($ARGV[0] ne '-')) { if ($journalctl_cmd) { $frmt = 'syslog2'; } else { $frmt = &autodetect_format($log_files[0]); } } elsif (!$format) { if ($journalctl_cmd) { $frmt = 'syslog2'; } else { $frmt = &autodetect_format($log_files[0]); } } } else { if (!$incremental) { print STDERR "WARNING: --rebuild require incremental mode, activating it.\n" } $incremental = 1; } $format ||= $frmt; # Set default top query $top ||= 20; # Set timezone $timezone = ((0-$timezone)*3600); # Set the default extension and output format if (!$extension) { if ($outfile =~ /\.bin/i) { $extension = 'binary'; } elsif ($outfile =~ /\.json/i) { if (eval {require JSON::XS;1;} ne 1) { localdie("Can not save output in json format, please install Perl module JSON::XS first.\n"); } else { JSON::XS->import(); } $extension = 'json'; } elsif ($outfile =~ /\.tsung/i) { $extension = 'tsung'; } elsif ($outfile =~ /\.htm[l]*/i) { $extension = 'html'; } elsif ($outfile) { $extension = 'txt'; } else { $extension = 'html'; } } elsif (lc($extension) eq 'json') { if (eval {require JSON::XS;1;} ne 1) { localdie("Can not save output in json format, please install Perl module JSON::XS first.\n"); } else { JSON::XS->import(); } } # Force text output with normalized query list only # and disable incremental report if ($dump_normalized_only) { $extension = 'txt'; $incremental = 0; $report_title = 'Normalized query report' if (!$report_title); } # Set default filename of the output file $outfile ||= 'out.' . $extension; &logmsg('DEBUG', "Output '$extension' reports will be written to $outfile"); # Set default syslog ident name $ident ||= 'postgres'; # Set default pie percentage limit or fix value $pie_percentage_limit = 0 if ($pie_percentage_limit < 0); $pie_percentage_limit = 2 if ($pie_percentage_limit eq ''); $pie_percentage_limit = 100 if ($pie_percentage_limit > 100); # Set default download image format $img_format = lc($img_format); $img_format = 'jpeg' if ($img_format eq 'jpg'); $img_format = 'png' if ($img_format ne 'jpeg'); # Extract the output directory from outfile so that graphs will # be created in the same directory if ($outfile ne '-') { if (!$outdir) { my @infs = fileparse($outfile); if ($infs[0] ne '') { $outdir = $infs[1]; } else { # maybe a confusion between -O and -o localdie("FATAL: output file $outfile is a directory, should be a file\nor maybe you want to use -O | --outdir option instead.\n"); } } elsif (!-d "$outdir") { # An output directory has been passed as command line parameter localdie("FATAL: $outdir is not a directory or doesn't exist.\n"); } $outfile = basename($outfile); $outfile = $outdir . '/' . $outfile; } # Remove graph support if output is not html $graph = 0 unless ($extension eq 'html' or $extension eq 'binary' or $extension eq 'json'); $graph = 0 if ($nograph); # Set some default values my $end_top = $top - 1; $queue_size ||= 1; $job_per_file ||= 1; if ($^O =~ /MSWin32|dos/i) { if ( ($queue_size > 1) || ($job_per_file > 1) ) { print STDERR "WARNING: parallel processing is not supported on this platform.\n"; $queue_size = 1; $job_per_file = 1; } } if ($extension eq 'tsung') { # Open filehandle my $fh = new IO::File ">$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outfile, $!\n"); } print $fh qq{ }; $fh->close(); } else { # Test file creation before going to parse log my $tmpfh = new IO::File ">$outfile"; if (not defined $tmpfh) { localdie("FATAL: can't write to $outfile, $!\n"); } $tmpfh->close(); unlink($outfile) if (-e $outfile); } # -w and --disable-error can't go together if ($error_only && $disable_error) { localdie("FATAL: please choose between no event report and reporting events only.\n"); } # Set default search pattern for database, user name, application name and host in log_line_prefix my $regex_prefix_dbname = qr/(?:db|database)=([^,]*)/; my $regex_prefix_dbuser = qr/(?:user|usr)=([^,]*)/; my $regex_prefix_dbclient = qr/(?:client|remote|ip|host)=([^,\(]*)/; my $regex_prefix_dbappname = qr/(?:app|application)=([^,]*)/; # Set pattern to look for query type my $action_regex = qr/^[\s\(]*(DELETE|INSERT|UPDATE|SELECT|COPY|WITH|CREATE|DROP|ALTER|TRUNCATE|BEGIN|COMMIT|ROLLBACK|START|END|SAVEPOINT)/is; # Loading excluded query from file if any if ($exclude_file) { open(IN, "$exclude_file") or localdie("FATAL: can't read file $exclude_file: $!\n"); my @exclq = ; close(IN); chomp(@exclq); map {s/\r//;} @exclq; foreach my $r (@exclq) { &check_regex($r, '--exclude-file'); } push(@exclude_query, @exclq); } # Testing regex syntax if ($#exclude_query >= 0) { foreach my $r (@exclude_query) { &check_regex($r, '--exclude-query'); } } # Testing regex syntax if ($#exclude_time >= 0) { foreach my $r (@exclude_time) { &check_regex($r, '--exclude-time'); } } # Loading included query from file if any if ($include_file) { open(IN, "$include_file") or localdie("FATAL: can't read file $include_file: $!\n"); my @exclq = ; close(IN); chomp(@exclq); map {s/\r//;} @exclq; foreach my $r (@exclq) { &check_regex($r, '--include-file'); } push(@include_query, @exclq); } # Testing regex syntax if ($#include_query >= 0) { foreach my $r (@include_query) { &check_regex($r, '--include-query'); } } # Check start/end date time if ($from) { if ($from !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) { localdie("FATAL: bad format for begin datetime, should be yyyy-mm-dd hh:mm:ss.l+tz\n"); } else { my $fractional_seconds = $7 || "0"; $from = "$1-$2-$3 $4:$5:$6.$7" } } if ($to) { if ($to !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) { localdie("FATAL: bad format for ending datetime, should be yyyy-mm-dd hh:mm:ss.l+tz\n"); } else { my $fractional_seconds = $7 || "0"; $to = "$1-$2-$3 $4:$5:$6.$7" } } if ($from && $to && ($from gt $to)) { localdie("FATAL: begin date is after end time!\n") ; } # Stores the last parsed line from log file to allow incremental parsing my $LAST_LINE = ''; # Set the level of the data aggregator, can be minute, hour or day follow the # size of the log file. my $LEVEL = 'hour'; # Month names my %month_abbr = ( 'Jan' => '01', 'Feb' => '02', 'Mar' => '03', 'Apr' => '04', 'May' => '05', 'Jun' => '06', 'Jul' => '07', 'Aug' => '08', 'Sep' => '09', 'Oct' => '10', 'Nov' => '11', 'Dec' => '12' ); my %abbr_month = ( '01' => 'Jan', '02' => 'Feb', '03' => 'Mar', '04' => 'Apr', '05' => 'May', '06' => 'Jun', '07' => 'Jul', '08' => 'Aug', '09' => 'Sep', '10' => 'Oct', '11' => 'Nov', '12' => 'Dec' ); # Keywords variable my @pg_keywords = qw( ALL ANALYSE ANALYZE AND ANY ARRAY AS ASC ASYMMETRIC AUTHORIZATION BERNOULLI BINARY BOTH CASE CAST CHECK COLLATE COLLATION COLUMN CONCURRENTLY CONSTRAINT CREATE CROSS CUBE CURRENT_DATE CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEFAULT DEFERRABLE DESC DISTINCT DO ELSE END EXCEPT FALSE FETCH FOR FOREIGN FREEZE FROM FULL GRANT GROUP GROUPING HAVING ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT LOCALTIME LOCALTIMESTAMP LOCKED LOGGED NATURAL NOT NOTNULL NULL ON ONLY OPEN OR ORDER OUTER OVER OVERLAPS PLACING POLICY PRIMARY REFERENCES RETURNING RIGHT ROLLUP SELECT SESSION_USER SETS SKIP SIMILAR SOME SYMMETRIC TABLE TABLESAMPLE THEN TO TRAILING TRUE UNION UNIQUE USER USING VARIADIC VERBOSE WHEN WHERE WINDOW WITH ); my @redshift_keywords = qw( AES128 AES256 ALLOWOVERWRITE BACKUP BLANKSASNULL BYTEDICT BZIP2 CREDENTIALS CURRENT_USER_ID DEFLATE DEFRAG DELTA DELTA32K DISABLE DISTKEY EMPTYASNULL ENABLE ENCODE ENCRYPT ENCRYPTION EXPLICIT GLOBALDICT256 GLOBALDICT64K GZIP INTERLEAVED LUN LUNS LZO LZOP MINUS MOSTLY13 MOSTLY32 MOSTLY8 NEW OFFLINE OFFSET OID OLD PARALLEL PERCENT PERMISSIONS RAW READRATIO RECOVER RESPECT REJECTLOG RESORT RESTORE SORTKEY SYSDATE TAG TDES TEXT255 TEXT32K TIMESTAMP TOP TRUNCATECOLUMNS WALLET ); my @beautify_pg_keywords = qw( ANALYSE ANALYZE CONCURRENTLY FREEZE ILIKE ISNULL LIKE NOTNULL PLACING RETURNING VARIADIC ); # Highlight variables my @KEYWORDS1 = qw( ALTER ADD AUTO_INCREMENT BETWEEN BY BOOLEAN BEGIN CHANGE COLUMNS COMMIT COALESCE CLUSTER COPY DATABASES DATABASE DATA DELAYED DESCRIBE DELETE DROP ENCLOSED ESCAPED EXISTS EXPLAIN FIELDS FIELD FLUSH FUNCTION GREATEST IGNORE INDEX INFILE INSERT IDENTIFIED IF INHERIT KEYS KILL KEY LINES LOAD LOCAL LOCK LOW_PRIORITY LANGUAGE LEAST LOGIN MODIFY NULLIF NOSUPERUSER NOCREATEDB NOCREATEROLE OPTIMIZE OPTION OPTIONALLY OUTFILE OWNER PROCEDURE PROCEDURAL READ REGEXP RENAME RETURN REVOKE RLIKE ROLE ROLLBACK SHOW SONAME STATUS STRAIGHT_JOIN SET SEQUENCE TABLES TEMINATED TRUNCATE TEMPORARY TRIGGER TRUSTED UN$filenumLOCK USE UPDATE UNSIGNED VALUES VARIABLES VIEW VACUUM WRITE ZEROFILL XOR ABORT ABSOLUTE ACCESS ACTION ADMIN AFTER AGGREGATE ALSO ALWAYS ASSERTION ASSIGNMENT AT ATTRIBUTE BACKWARD BEFORE BIGINT CACHE CALLED CASCADE CASCADED CATALOG CHAIN CHARACTER CHARACTERISTICS CHECKPOINT CLOSE COMMENT COMMENTS COMMITTED CONFIGURATION CONNECTION CONSTRAINTS CONTENT CONTINUE CONVERSION COST CSV CURRENT CURSOR CYCLE DAY DEALLOCATE DEC DECIMAL DECLARE DEFAULTS DEFERRED DEFINER DELIMITER DELIMITERS DICTIONARY DISABLE DISCARD DOCUMENT DOMAIN DOUBLE EACH ENABLE ENCODING ENCRYPTED ENUM ESCAPE EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXTENSION EXTERNAL FIRST FLOAT FOLLOWING FORCE FORWARD FUNCTIONS GLOBAL GRANTED HANDLER HEADER HOLD HOUR IDENTITY IMMEDIATE IMMUTABLE IMPLICIT INCLUDING INCREMENT INDEXES INHERITS INLINE INOUT INPUT INSENSITIVE INSTEAD INT INTEGER INVOKER ISOLATION LABEL LARGE LAST LC_COLLATE LC_CTYPE LEAKPROOF LEVEL LISTEN LOCATION LOOP MAPPING MATCH MAXVALUE MINUTE MINVALUE MODE MONTH MOVE NAMES NATIONAL NCHAR NEXT NO NONE NOTHING NOTIFY NOWAIT NULLS OBJECT OF OFF OIDS OPERATOR OPTIONS OUT OWNED PARSER PARTIAL PARTITION PASSING PASSWORD PLANS PRECEDING PRECISION PREPARE PREPARED PRESERVE PRIOR PRIVILEGES QUOTE RANGE REAL REASSIGN RECHECK RECURSIVE REF REINDEX RELATIVE RELEASE REPEATABLE REPLICA RESET RESTART RESTRICT RETURNS ROW ROWS RULE SAVEPOINT SCHEMA SCROLL SEARCH SECOND SECURITY SEQUENCES SERIALIZABLE SERVER SESSION SETOF SHARE SIMPLE SMALLINT SNAPSHOT STABLE STANDALONE START STATEMENT STATISTICS STORAGE STRICT SYSID SYSTEM TABLESPACE TEMP TEMPLATE TRANSACTION TREAT TYPE TYPES UNBOUNDED UNCOMMITTED UNENCRYPTED UNKNOWN UNLISTEN UNLOGGED UNTIL VALID VALIDATE VALIDATOR VALUE VARYING VOLATILE WHITESPACE WITHOUT WORK WRAPPER XMLATTRIBUTES XMLCONCAT XMLELEMENT XMLEXISTS XMLFOREST XMLPARSE XMLPI XMLROOT XMLSERIALIZE YEAR YES ZONE ); foreach my $k (@pg_keywords) { push(@KEYWORDS1, $k) if (!grep(/^$k$/i, @KEYWORDS1)); } foreach my $k (@redshift_keywords) { push(@KEYWORDS1, $k) if (!grep(/^$k$/i, @KEYWORDS1)); } my @KEYWORDS2 = ( 'ascii', 'age', 'bit_length', 'btrim', 'char_length', 'character_length', 'convert', 'chr', 'current_date', 'current_time', 'current_timestamp', 'count', 'decode', 'date_part', 'date_trunc', 'encode', 'extract', 'get_byte', 'get_bit', 'initcap', 'isfinite', 'interval', 'justify_hours', 'justify_days', 'lower', 'length', 'lpad', 'ltrim', 'localtime', 'localtimestamp', 'md5', 'now', 'octet_length', 'overlay', 'position', 'pg_client_encoding', 'quote_ident', 'quote_literal', 'repeat', 'replace', 'rpad', 'rtrim', 'substring', 'split_part', 'strpos', 'substr', 'set_byte', 'set_bit', 'trim', 'to_ascii', 'to_hex', 'translate', 'to_char', 'to_date', 'to_timestamp', 'to_number', 'timeofday', 'upper', ); my @KEYWORDS3 = ('STDIN', 'STDOUT'); my %SYMBOLS = ( '=' => '=', '<' => '<', '>' => '>', '\|' => '|', ',' => ',', '\.' => '.', '\+' => '+', '\-' => '-', '\*' => '*', '\/' => '/', '!=' => '!=' ); my @BRACKETS = ('(', ')'); map {$_ = quotemeta($_)} @BRACKETS; # Inbounds of query times histogram my @histogram_query_time = (0, 1, 5, 10, 25, 50, 100, 500, 1000, 10000); # Inbounds of session times histogram my @histogram_session_time = (0, 500, 1000, 30000, 60000, 600000, 1800000, 3600000, 28800000); # Where statistics are stored my %overall_stat = (); my %pgb_overall_stat = (); my %overall_checkpoint = (); my @top_slowest = (); my %normalyzed_info = (); my %error_info = (); my %pgb_error_info = (); my %pgb_pool_info = (); my %logs_type = (); my %errors_code = (); my %per_minute_info = (); my %pgb_per_minute_info = (); my %lock_info = (); my %tempfile_info = (); my %cancelled_info = (); my %connection_info = (); my %pgb_connection_info = (); my %database_info = (); my %application_info = (); my %user_info = (); my %host_info = (); my %session_info = (); my %pgb_session_info = (); my %conn_received = (); my %checkpoint_info = (); my %autovacuum_info = (); my %autoanalyze_info = (); my @graph_values = (); my %cur_info = (); my %cur_temp_info = (); my %cur_plan_info = (); my %cur_cancel_info = (); my %cur_lock_info = (); my $nlines = 0; my %last_line = (); my %pgb_last_line = (); our %saved_last_line = (); our %pgb_saved_last_line= (); my %tsung_session = (); my @top_locked_info = (); my @top_tempfile_info = (); my @top_cancelled_info = (); my %drawn_graphs = (); # Global output filehandle my $fh = undef; my $t0 = Benchmark->new; # Write resources files from __DATA__ section if they have not been already copied # and return the HTML links to that files. If --extra-file is not used returns the # CSS and JS code to be embeded in HTML files my @jscode = &write_resources(); # Automatically set parameters with incremental mode if ($incremental) { # In incremental mode an output directory must be set if (!$outdir) { localdie("FATAL: you must specify an output directory with incremental mode, see -O or --outdir.\n") } # Ensure this is not a relative path if (dirname($outdir) eq '.') { localdie("FATAL: output directory ($outdir) is not an absolute path.\n"); } # Ensure that the directory already exists if (!-d $outdir) { localdie("FATAL: output directory $outdir does not exists\n"); } # Set default last parsed file in incremental mode if (!$last_parsed) { $last_parsed = $outdir . '/LAST_PARSED'; } $outfile = 'index.html'; # Set default output format $extension = 'binary'; if ($rebuild) { # Look for directory where report must be generated again my @build_directories = (); # Find directories that shoud be rebuilt unless(opendir(DIR, "$outdir")) { localdie("Error: can't opendir $outdir: $!"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("Error: can't opendir $outdir/$y: $!"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("Error: can't opendir $outdir/$y/$m: $!"); } my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { unless(opendir(DIR, "$outdir/$y/$m/$d")) { localdie("Error: can't opendir $outdir/$y/$m/$d: $!"); } my @binfiles = grep { $_ =~ /\.bin$/ } readdir(DIR); closedir DIR; push(@build_directories, "$y-$m-$d") if ($#binfiles >= 0); } } } &build_incremental_reports(@build_directories); my $t2 = Benchmark->new; my $td = timediff($t2, $t0); &logmsg('DEBUG', "rebuilding reports took: " . timestr($td)); # Remove pidfile unlink("$PID_DIR/pgbadger.pid"); exit 0; } } else { # Extra files for resources are not allowed without incremental mode $extra_files = 0; } # Reading last line parsed if ($last_parsed && -e $last_parsed) { if (open(IN, "$last_parsed")) { my @content = ; close(IN); foreach my $line (@content) { chomp($line); next if (!$line); my ($datetime, $current_pos, $orig, @others) = split(/\t/, $line); # Last parsed line with pgbouncer log starts with this keyword if ($datetime eq 'pgbouncer') { $pgb_saved_last_line{datetime} = $current_pos; $pgb_saved_last_line{current_pos} = $orig; $pgb_saved_last_line{orig} = join("\t", @others); } else { $saved_last_line{datetime} = $datetime; $saved_last_line{current_pos} = $current_pos; $saved_last_line{orig} = $orig; } } # Those two log format must be read from start of the file if ( ($format eq 'binary') || ($format eq 'csv') ) { $saved_last_line{current_pos} = 0; $pgb_saved_last_line{current_pos} = 0 if ($format eq 'binary'); } } else { localdie("FATAL: can't read last parsed line from $last_parsed, $!\n"); } } $tmp_last_parsed = 'tmp_' . basename($last_parsed) if ($last_parsed); $tmp_last_parsed = "$TMP_DIR/$tmp_last_parsed"; # Clean the incremental directory if the feature is not disabled if (!$noclean && $outdir && ($saved_last_line{datetime} || $pgb_saved_last_line{datetime})) { my $last_year = ''; my $last_month = ''; my $last_day = ''; # Search the current week following the last parse date if ( ($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) ) { $last_year = $1; $last_month = $2; $last_day = $3; } # Get the week number following the date my $wn = &get_week_number($last_year, $last_month, $last_day); # Get the days of the current week where binary files must be preserved my @wdays = &get_wdays_per_month($wn - 1, "$last_year-$last_month"); # Find obsolete dir days that shoud be cleaned unless(opendir(DIR, "$outdir")) { localdie("Error: can't opendir $outdir: $!"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @obsolete_days = (); foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("Error: can't opendir $outdir/$y: $!"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("Error: can't opendir $outdir/$y/$m: $!"); } my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { if ("$y-$m-$d" lt $wdays[0]) { push(@obsolete_days, "$outdir/$y/$m/$d"); } } } } foreach my $p (@obsolete_days) { unless(opendir(DIR, "$p")) { localdie("Error: can't opendir $p: $!"); } my @hfiles = grep { $_ =~ /\.(html|txt|tsung|json)$/i } readdir(DIR); next if ($#hfiles == -1); # do not remove files if report file has not been generated seekdir(DIR, 0); my @bfiles = grep { $_ =~ /\.bin$/i } readdir(DIR); closedir DIR; foreach my $f (@bfiles) { &logmsg('DEBUG', "Removing obsolete binary file: $p/$f"); unlink("$p/$f"); } } } # Clear storage when a retention is specified in incremental mode if ( $outdir && $retention && ($saved_last_line{datetime} || $pgb_saved_last_line{datetime}) ) { if (($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /)) { # Search the current week following the last parse date my $limit = $1; my $wn = &get_week_number($1, $2, $3); if (($wn - $retention) < 1) { $limit--; $limit .= "52"; } else { $limit .= sprintf("%02d", $wn - $retention); } # Find obsolete weeks dir that shoud be cleaned unless(opendir(DIR, "$outdir")) { localdie("Error: can't opendir $outdir: $!"); } my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @obsolete_weeks = (); foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("Error: can't opendir $outdir/$y: $!"); } my @weeks = grep { $_ =~ /^week-\d+$/ } readdir(DIR); closedir DIR; foreach my $w (sort { $a <=> $b } @weeks) { $w =~ /^week-(\d+)$/; if ("$y$1" lt $limit) { &logmsg('DEBUG', "Removing obsolete week directory $outdir/$y/week-$1"); &cleanup_directory("$outdir/$y/week-$1", 1); push(@obsolete_weeks, "$y$1"); } } } # Now removed the corresponding days foreach my $y (sort { $a <=> $b } @dyears) { unless(opendir(DIR, "$outdir/$y")) { localdie("Error: can't opendir $outdir/$y: $!"); } my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; my @rmmonths = (); foreach my $m (sort { $a <=> $b } @dmonths) { unless(opendir(DIR, "$outdir/$y/$m")) { localdie("Error: can't opendir $outdir/$y/$m: $!"); } my @rmdays = (); my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR); closedir DIR; foreach my $d (sort { $a <=> $b } @ddays) { my $weekNumber = ''; if (!$week_start_monday) { $weekNumber = sprintf("%02d", POSIX::strftime("%U", 1, 1, 1, $d, $m - 1, $y - 1900)+1); } else { $weekNumber = sprintf("%02d", POSIX::strftime("%W", 1, 1, 1, $d, $m - 1, $y - 1900)+1); } if ($#obsolete_weeks >= 0) { if (grep(/^$y$weekNumber$/, @obsolete_weeks)) { &logmsg('DEBUG', "Removing obsolete directory $outdir/$y/$m/$d"); &cleanup_directory("$outdir/$y/$m/$d", 1); push(@rmdays, $d); } } else { # Remove obsolete days when we are in binary mode # with noreport - there's no week-N directory my $diff_day = $retention * 7 * 86400; my $oldday = POSIX::strftime("%s", 1,1,1,$d,$m-1,$y-1900); my $lastday = $oldday; if (($saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /) || ($pgb_saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /)) { $lastday = POSIX::strftime("%s", 1,1,1,$3,$2-1,$1-1900); } if (($lastday - $oldday) > $diff_day) { &logmsg('DEBUG', "Removing obsolete directory $outdir/$y/$m/$d"); &cleanup_directory("$outdir/$y/$m/$d", 1); push(@rmdays, $d); } } } if ($#ddays == $#rmdays) { &logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y/$m"); rmdir("$outdir/$y/$m"); push(@rmmonths, $m); } } if ($#dmonths == $#rmmonths) { &logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y"); rmdir("$outdir/$y"); } } } } # Main loop reading log files my $global_totalsize = 0; my @given_log_files = ( @log_files ); chomp(@given_log_files); # Append journalctl command at top of log file list unshift(@given_log_files, $journalctl_cmd) if ($journalctl_cmd); # Verify that the file has not changed for incremental move if (!$remote_host) { my @tmpfilelist = (); # Removed files that have already been parsed during previous runs foreach my $f (@given_log_files) { if ($f eq '-') { &logmsg('DEBUG', "waiting for log entries from stdin."); $saved_last_line{current_pos} = 0; push(@tmpfilelist, $f); } elsif ( $journalctl_cmd && ($f eq $journalctl_cmd) ) { my $since = ''; if ( ($journalctl_cmd !~ /--since|-S/) && ($saved_last_line{datetime} =~ /^(\d+)-(\d+)-(\d+).(\d+):(\d+):(\d+)/) ) { $since = " --since='$1-$2-$3 $4:$5:$6'"; } &logmsg('DEBUG', "journalctl call will start since: $saved_last_line{datetime}"); push(@tmpfilelist, "$f $since"); } else { # Auto detect log format for proper parsing my $fmt = autodetect_format($f); # Set regex to parse the log file $fmt = set_parser_regex($fmt); if (($fmt ne 'pgbouncer') && ($saved_last_line{current_pos} > 0)) { my ($retcode, $msg) = &check_file_changed($f, $fmt, $saved_last_line{datetime}, $saved_last_line{current_pos}); if (!$retcode) { &logmsg('DEBUG', "this file has already been parsed: $f, $msg"); } else { push(@tmpfilelist, $f); } } elsif (($fmt eq 'pgbouncer') && ($pgb_saved_last_line{current_pos} > 0)) { my ($retcode, $msg) = &check_file_changed($f, $fmt, $pgb_saved_last_line{datetime}, $pgb_saved_last_line{current_pos}); if (!$retcode) { &logmsg('DEBUG', "this file has already been parsed: $f, $msg"); } else { push(@tmpfilelist, $f); } } else { push(@tmpfilelist, $f); } } } @given_log_files = (); push(@given_log_files, @tmpfilelist); } else { # Disable multi process when using ssh to parse remote log if ($queue_size > 1) { &logmsg('DEBUG', "parallel processing through ssh is not supported with remote file."); } $queue_size = 1; } # Disable parallel processing in binary mode if ($format eq 'binary') { if (($queue_size > 1) || ($job_per_file > 1)) { &logmsg('DEBUG', "parallel processing is not supported with binary format.") if (!$quiet); } $queue_size = 1; $job_per_file = 1; } # Pipe used for progress bar in multiprocess my $pipe; # Seeking to an old log position is not possible outside incremental mode if (!$last_parsed) { $saved_last_line{current_pos} = 0; $pgb_saved_last_line{current_pos} = 0; } # Start parsing all given files using multiprocess if ( ($#given_log_files >= 0) && (($queue_size > 1) || ($job_per_file > 1)) ) { # Number of running process my $child_count = 0; # Set max number of parallel process my $parallel_process = $queue_size; if ($job_per_file > 1) { $parallel_process = $job_per_file; } # Store total size of the log files foreach my $logfile ( @given_log_files ) { $global_totalsize += &get_log_file($logfile); } # Open a pipe for interprocess communication my $reader = new IO::Handle; my $writer = new IO::Handle; $pipe = IO::Pipe->new($reader, $writer); $writer->autoflush(1); # Fork the logger process if ($progress) { spawn sub { &multiprocess_progressbar($global_totalsize); }; } # Parse each log file following the multiprocess mode chosen (-j or -J) foreach my $logfile ( @given_log_files ) { while ($child_count >= $parallel_process) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { $child_count--; delete $RUNNING_PIDS{$kid}; } sleep(1); } # Get log format of the current file my $fmt = $format || 'stderr'; if ($logfile ne '-' && !$journalctl_cmd) { $fmt = &autodetect_format($logfile); $fmt ||= $format; &logmsg('DEBUG', "pgBadger will use log format $fmt to parse $logfile."); } else { &logmsg('DEBUG', "Can not autodetect log format, assuming $fmt."); } # Set regex to parse the log file $fmt = set_parser_regex($fmt); # Do not use split method with compressed files and stdin if ( ($queue_size > 1) && ($logfile !~ /\.(gz|bz2|zip|xz)$/i) && ($logfile ne '-') && (!$journalctl_cmd || ($logfile !~ /\Q$journalctl_cmd\E/)) ) { # Create multiple processes to parse one log file by chunks of data my @chunks = split_logfile($logfile, ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos}); &logmsg('DEBUG', "The following boundaries will be used to parse file $logfile, " . join('|', @chunks)); for (my $i = 0; $i < $#chunks; $i++) { while ($child_count >= $parallel_process) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { $child_count--; delete $RUNNING_PIDS{$kid}; } sleep(1); } localdie("FATAL: Abort signal received when processing to next chunk\n") if ($interrupt == 2); last if ($interrupt); push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, UNLINK => 1 ) ]); spawn sub { &process_file($logfile, $fmt, $tempfiles[-1]->[0], $chunks[$i], $chunks[$i+1], $i); }; $child_count++; } } else { # Start parsing one file per parallel process push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, UNLINK => 1 ) ]); spawn sub { &process_file($logfile, $fmt, $tempfiles[-1]->[0], ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos}); }; $child_count++; } localdie("FATAL: Abort signal received when processing next file\n") if ($interrupt == 2); last if ($interrupt); } my $minproc = 1; $minproc = 0 if (!$progress); # Wait for all child processes to localdie except for the logger while (scalar keys %RUNNING_PIDS > $minproc) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { delete $RUNNING_PIDS{$kid}; } sleep(1); } # Terminate the process logger foreach my $k (keys %RUNNING_PIDS) { kill('USR1', $k); %RUNNING_PIDS = (); } # Clear previous statistics &init_stats_vars(); # Load all data gathered by all the different processes foreach my $f (@tempfiles) { next if (!-e "$f->[1]" || -z "$f->[1]"); my $fht = new IO::File; $fht->open("< $f->[1]") or localdie("FATAL: can't open temp file $f->[1], $!\n"); &load_stats($fht); $fht->close(); } } else { # Multiprocessing disabled, parse log files one by one foreach my $logfile ( @given_log_files ) { # Get log format of the current file my $fmt = $format || 'stderr'; if (!$journalctl_cmd) { $fmt = &autodetect_format($logfile); $fmt ||= $format; &logmsg('DEBUG', "pgBadger will use log format $fmt to parse $logfile."); } else { &logmsg('DEBUG', "Can not autodetect log format, assuming $fmt."); } # Set regex to parse the log file $fmt = set_parser_regex($fmt); last if (&process_file($logfile, $fmt, '', ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{current_pos} : $saved_last_line{current_pos})); } } # Get last line parsed from all process if ($last_parsed) { if (open(IN, "$tmp_last_parsed") ) { while (my $line = ) { chomp($line); $line =~ s/\r//; my ($d, $p, $l, @o) = split(/\t/, $line); if ($d ne 'pgbouncer') { if (!$last_line{datetime} || ($d gt $last_line{datetime})) { $last_line{datetime} = $d; $last_line{orig} = $l; $last_line{current_pos} = $p; } } else { $d = $p; $p = $l; $l = join("\t", @o); if (!$pgb_last_line{datetime} || ($d gt $pgb_last_line{datetime})) { $pgb_last_line{datetime} = $d; $pgb_last_line{orig} = $l; $pgb_last_line{current_pos} = $p; } } } close(IN); } unlink("$tmp_last_parsed"); } # Save last line parsed if ($last_parsed && ($last_line{datetime} || $pgb_last_line{datetime}) && ($last_line{orig} || $pgb_last_line{orig}) ) { if (open(OUT, ">$last_parsed")) { if ($last_line{datetime}) { $last_line{current_pos} ||= 0; print OUT "$last_line{datetime}\t$last_line{current_pos}\t$last_line{orig}\n"; } elsif ($saved_last_line{datetime}) { $saved_last_line{current_pos} ||= 0; print OUT "$saved_last_line{datetime}\t$saved_last_line{current_pos}\t$saved_last_line{orig}\n"; } if ($pgb_last_line{datetime}) { $pgb_last_line{current_pos} ||= 0; print OUT "pgbouncer\t$pgb_last_line{datetime}\t$pgb_last_line{current_pos}\t$pgb_last_line{orig}\n"; } elsif ($pgb_saved_last_line{datetime}) { $pgb_saved_last_line{current_pos} ||= 0; print OUT "pgbouncer\t$pgb_saved_last_line{datetime}\t$pgb_saved_last_line{current_pos}\t$pgb_saved_last_line{orig}\n"; } close(OUT); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed, $!"); } } if ($terminate) { unlink("$PID_DIR/pgbadger.pid"); exit 2; } my $t1 = Benchmark->new; my $td = timediff($t1, $t0); &logmsg('DEBUG', "the log statistics gathering took:" . timestr($td)); if (!$incremental && ($#given_log_files >= 0) ) { &logmsg('LOG', "Ok, generating $extension report..."); if ($extension ne 'tsung') { $fh = new IO::File ">$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outfile, $!\n"); } if (($extension eq 'text') || ($extension eq 'txt')) { if ($error_only) { &dump_error_as_text(); } else { &dump_as_text(); } } elsif ($extension eq 'json') { if ($error_only) { &dump_error_as_json(); } else { &dump_as_json(); } } elsif ($extension eq 'binary') { &dump_as_binary($fh); } else { # Create instance to prettify SQL query if (!$noprettify) { $sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords); } &dump_as_html('.'); } $fh->close; } else { # Open filehandle $fh = new IO::File ">>$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outfile, $!\n"); } print $fh "\n\n"; $fh->close(); } } elsif (!$incremental || !$noreport) { # Look for directory where report must be generated my @build_directories = (); if (-e "$last_parsed.tmp") { if (open(IN, "$last_parsed.tmp")) { while (my $l = ) { chomp($l); $l =~ s/\r//; push(@build_directories, $l) if (!grep(/^$l$/, @build_directories)); } close(IN); unlink("$last_parsed.tmp"); } else { &logmsg('ERROR', "can't read file $last_parsed.tmp, $!"); } } else { &logmsg('DEBUG', "no new entries in your log(s) since last run."); } &build_incremental_reports(@build_directories); } my $t2 = Benchmark->new; $td = timediff($t2, $t1); &logmsg('DEBUG', "building reports took: " . timestr($td)); $td = timediff($t2, $t0); &logmsg('DEBUG', "the total execution time took: " . timestr($td)); # Remove pidfile and temporary file unlink("$PID_DIR/pgbadger.pid"); unlink("$last_parsed.tmp") if (-e "$last_parsed.tmp"); exit 0; #------------------------------------------------------------------------------- # Show pgBadger command line usage sub usage { print qq{ Usage: pgbadger [options] logfile [...] PostgreSQL log analyzer with fully detailed reports and graphs. Arguments: logfile can be a single log file, a list of files, or a shell command returning a list of files. If you want to pass log content from stdin use - as filename. Note that input from stdin will not work with csvlog. Options: -a | --average minutes : number of minutes to build the average graphs of queries and connections. Default 5 minutes. -A | --histo-average min: number of minutes to build the histogram graphs of queries. Default 60 minutes. -b | --begin datetime : start date/time for the data to be parsed in log. -B | --bar-graph : use bar graph instead of line by default. -c | --dbclient host : only report on entries for the given client host. -C | --nocomment : remove comments like /* ... */ from queries. -d | --dbname database : only report on entries for the given database. -D | --dns-resolv : client ip addresses are replaced by their DNS name. Be warned that this can really slow down pgBadger. -e | --end datetime : end date/time for the data to be parsed in log. -f | --format logtype : possible values: syslog, syslog2, stderr, csv and pgbouncer. Use this option when pgBadger is not able to auto-detect the log format Default: stderr. -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by days in a separate directory, --outdir must be set. -j | --jobs number : number of jobs to run at same time. Default is 1, run as single process. -J | --Jobs number : number of log file to parse in parallel. Default is 1, run as single process. -l | --last-parsed file: allow incremental log parsing by registering the last datetime and line parsed. Useful if you want to watch errors since last run or if you want one report per day with a log rotated each week. -L | --logfile-list file:file containing a list of log file to parse. -m | --maxlength size : maximum length of a query, it will be restricted to the given size. Default: no truncate -M | --no-multiline : do not collect multiline statement to avoid garbage especially on errors that generate a huge report. -n | --nohighlight : disable SQL code highlighting. -N | --appname name : only report on entries for given application name -o | --outfile filename: define the filename for the output. Default depends on the output format: out.html, out.txt, out.bin, out.json or out.tsung. With module JSON::XS installed, you can output file in JSON format either. To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix includes additional variables like client ip or application name. See examples below. -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -r | --remote-host ip : set the host where to execute the cat command on remote logfile to parse locally the file. -R | --retention N : number of weeks to keep in incremental mode. Default to 0, disabled. Used to set the number of weeks to keep in output directory. Older weeks and days directory are automatically removed. -s | --sample number : number of query samples to store. Default: 3. -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20. -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. -U | --exclude-user username : exclude entries for the specified user from report. -v | --verbose : enable verbose or debug mode. Disabled by default. -V | --version : show pgBadger version and exit. -w | --watch-mode : only report errors just like logwatch could do. -x | --extension : output format. Values: text, html, bin, json or tsung. Default: html -X | --extra-files : in incremental mode allow pgBadger to write CSS and JS files in the output directory as separate files. -z | --zcat exec_path : set the full path to the zcat program. Use it if zcat or bzcat or unzip is not in your path. -Z | --timezone +/-XX : Set the number of hours from GMT of the timezone. Use this to adjust date/time in JavaScript graphs. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. --include-query regex : any query that does not match the given regex will be excluded from the report. You can use this option multiple times. For example: "(tbl1|tbl2)". --include-file filename: path of the file which contains all the regex of the queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. --disable-type : do not generate report of queries by type, database or user. --disable-query : do not generate query reports (slowest, most frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. --disable-temporary : do not generate temporary report. --disable-checkpoint : do not generate checkpoint/restartpoint report. --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , --exclude-time regex : any timestamp matching the given regex will be excluded from the report. Example: "2013-04-12 .*" You can use this option multiple times. --exclude-appname name : exclude entries for the specified application name from report. Example: "pg_dump". --exclude-line regex : pgBadger will start to exclude any log entry that will match the given regex. Can be used multiple time. --anonymize : obscure all literals in queries, useful to hide confidential data. --noreport : prevent pgBadger to create reports in incremental mode. --log-duration : force pgBadger to associate log entries generated by both log_duration = on and log_statement = 'all' --enable-checksum : used to add a md5 sum under each query report. --journalctl command : command to use to replace PostgreSQL logfile by a call to journalctl. Basically it might be: journalctl -u postgresql-9.5 --pid-dir dirpath : set the path of the directory where the pid file will be written to be able to run two pgBadger at the same time. --rebuild : used to rebuild all html reports in incremental output directories where there is binary data files. --pgbouncer-only : only show PgBouncer related menu in the header. --start-monday : in incremental mode, calendar's weeks start on sunday. Use this option to start on monday. --normalized-only : only dump all normalized query to out.txt pgBadger is able to parse a remote log file using a passwordless ssh connection. Use the -r or --remote-host to set the host ip address or hostname. There's also some additional options to fully control the ssh connection. --ssh-program ssh path to the ssh program to use. Default: ssh. --ssh-user username connection login name. Default to running user. --ssh-identity file path to the identity file to use. --ssh-timeout second timeout to ssh connection failure. Default 10 secs. --ssh-option options list of -o options to use for the ssh connection. Options always used: -o ConnectTimeout=\$ssh_timeout -o PreferredAuthentications=hostbased,publickey Examples: pgbadger /var/log/postgresql.log pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz \ /var/log/postgres.log pgbadger /var/log/postgresql/postgresql-2012-05-* pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" \ /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - # Log prefix with stderr log output perl pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,client=%h' \ /pglog/postgresql-2012-08-21* perl pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output perl pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' \ /pglog/postgresql-2012-08-21* # Use my 8 CPUs to parse my 10GB file faster, much faster perl pgbadger -j 8 /pglog/postgresql-9.1-main.log Generate Tsung sessions XML file with select queries only: perl pgbadger -S -o sessions.tsung --prefix '%t [%p]: [%l-1] user=%u,db=%d ' /pglog/postgresql-9.1.log Reporting errors every week by cron job: 30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html Generate report every week using incremental behavior: 0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"` \ -o /var/reports/pg_errors-`date +%F`.html -l /var/reports/pgbadger_incremental_file.dat This supposes that your log file and HTML report are also rotated every week. Or better, use the auto-generated incremental reports: 0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 \ -O /var/www/pg_reports/ will generate a report per day and per week. In incremental mode, you can also specify the number of week to keep in the reports: /usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1 \ -O /var/www/pg_reports/ If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can use pgBadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log This will help avoid having COPY statements, as generated by pg_dump, on top of the list of slowest queries. You can also use --exclude-appname "pg_dump" to solve this problem in a simpler way. You can also parse journalctl output just as if it was a log file: pgbadger --journalctl 'journalctl -u postgresql-9.5' or worst, call it from a remote host: pgbadger -r 192.168.1.159 --journalctl 'journalctl -u postgresql-9.5' you don't need to specify any log file at command line, but if you have other PostgreSQL log file to parse, you can add them as usual. To rebuild all incremental html reports after, proceed as follow: rm /path/to/reports/*.js rm /path/to/reports/*.css pgbadger -X -I -O /path/to/reports/ --rebuild it will also update all resource files (JS and CSS). }; exit 0; } # Get inbounds of query times histogram sub get_hist_inbound { my ($duration, @histogram) = @_; for (my $i = 0; $i <= $#histogram; $i++) { return $histogram[$i-1] if ($histogram[$i] > $duration); } return -1; } # Compile custom log line prefix prefix sub set_parser_regex { my $fmt = shift; @prefix_params = (); if ($fmt eq 'pgbouncer') { $pgbouncer_log_format = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?[:\d]* (.*)/; @pgb_prefix_params = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); $pgbouncer_log_parse1 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) (.*)/; @pgb_prefix_parse1 = ('t_timestamp', 't_pid', 't_loglevel', 't_query'); $pgbouncer_log_parse2 = qr/^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) ([^\s]+) (.\-0x[0-9a-f\.]*): ([0-9a-zA-Z\_\[\]\-\.]*)\/([0-9a-zA-Z\_\[\]\-\.]*)\@([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?[:\d]* (.*)/; @pgb_prefix_parse2 = ('t_timestamp', 't_pid', 't_loglevel', 't_session_id', 't_dbname', 't_dbuser', 't_client', 't_query'); } elsif ($log_line_prefix) { # Build parameters name that will be extracted from the prefix regexp my $llp = ''; ($llp, @prefix_params) = &build_log_line_prefix_regex($log_line_prefix); if ($fmt eq 'syslog') { $llp = '^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; unshift(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line'); push(@prefix_params, 't_loglevel', 't_query'); $other_syslog_line = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*)/; } elsif ($fmt eq 'syslog2') { $fmt = 'syslog'; $llp = '^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)\-\d+\])?\s*' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line'); push(@prefix_params, 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)\-\d+\])?\s*(.*)/; } elsif ($fmt eq 'stderr') { $llp = '^' . $llp . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)'; $compiled_prefix = qr/$llp/; push(@prefix_params, 't_loglevel', 't_query'); } } elsif ($fmt eq 'syslog') { $compiled_prefix = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query'); $other_syslog_line = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*)/; } elsif ($fmt eq 'syslog2') { $fmt = 'syslog'; $compiled_prefix = qr/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)\-\d+\])?\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query'); $other_syslog_line = qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?(?:\s\[(\d+)\-\d+\])?\s*(.*)/; } elsif ($fmt eq 'stderr') { $compiled_prefix = qr/^(\d{10}\.\d{3}|\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})[\.\d]*(?: [A-Z\+\-\d]{3,6})?\s\[(\d+)\]:\s\[(\d+)\-\d+\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_timestamp', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query'); } elsif ($fmt eq 'default') { $fmt = 'stderr'; $compiled_prefix = qr/^(\d{10}\.\d{3}|\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})[\.\d]*(?: [A-Z\+\-\d]{3,6})?\s\[(\d+)\]\s(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT|LOCATION):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/; push(@prefix_params, 't_timestamp', 't_pid', 't_logprefix', 't_loglevel', 't_query'); } return $fmt; } sub check_regex { my ($pattern, $varname) = @_; eval {m/$pattern/i;}; if ($@) { localdie("FATAL: '$varname' invalid regex '$pattern', $!\n"); } } sub build_incremental_reports { my @build_directories = @_; my %weeks_directories = (); foreach $incr_date (sort @build_directories) { $last_incr_date = $incr_date; # Set the path to binary files my $bpath = $incr_date; $bpath =~ s/\-/\//g; $incr_date =~ /^(\d+)-(\d+)\-(\d+)$/; # Get the week number following the date my $wn = &get_week_number($1, $2, $3); $weeks_directories{$wn} = "$1-$2" if ($rebuild || !exists $weeks_directories{$wn}); # First clear previous stored statistics &init_stats_vars(); # Load all data gathered by all the different processes unless(opendir(DIR, "$outdir/$bpath")) { localdie("Error: can't opendir $outdir/$bpath: $!"); } my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $outdir/$bpath/$f") or localdie("FATAL: can't open file $outdir/$bpath/$f, $!\n"); &load_stats($fht); $fht->close(); } &logmsg('LOG', "Ok, generating HTML daily report into $outdir/$bpath/..."); $fh = new IO::File ">$outdir/$bpath/$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outdir/$bpath/$outfile, $!\n"); } # Create instance to prettify SQL query if (!$noprettify) { $sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords); } &dump_as_html('../../..'); $fh->close; } # Build a report per week foreach my $wn (sort { $a <=> $b } keys %weeks_directories) { &init_stats_vars(); # Get all days of the current week my @wdays = &get_wdays_per_month($wn - 1, $weeks_directories{$wn}); my $wdir = ''; # Load data per day foreach $incr_date (@wdays) { my $bpath = $incr_date; $bpath =~ s/\-/\//g; $incr_date =~ /^(\d+)\-(\d+)\-(\d+)$/; $wdir = "$1/week-$wn"; # Load all data gathered by all the differents processes if (-e "$outdir/$bpath") { unless(opendir(DIR, "$outdir/$bpath")) { localdie("Error: can't opendir $outdir/$bpath: $!"); } my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR); closedir DIR; foreach my $f (@mfiles) { my $fht = new IO::File; $fht->open("< $outdir/$bpath/$f") or localdie("FATAL: can't open file $outdir/$bpath/$f, $!\n"); &load_stats($fht); $fht->close(); } } } &logmsg('LOG', "Ok, generating HTML weekly report into $outdir/$wdir/..."); if (!-d "$outdir/$wdir") { mkdir("$outdir/$wdir"); } $fh = new IO::File ">$outdir/$wdir/$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outdir/$wdir/$outfile, $!\n"); } # Create instance to prettify SQL query if (!$noprettify) { $sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords); } &dump_as_html('../..'); $fh->close; } &logmsg('LOG', "Ok, generating global index to access incremental reports..."); $fh = new IO::File ">$outdir/index.html"; if (not defined $fh) { localdie("FATAL: can't write to $outdir/index.html, $!\n"); } my $date = localtime(time); my @tmpjscode = @jscode; map { s/EDIT_URI/\./; } @tmpjscode; my $local_title = 'Global Index on incremental reports'; if ($report_title) { $local_title = 'Global Index - ' . $report_title; } print $fh qq{ pgBadger :: $local_title @tmpjscode


}; # get year directories unless(opendir(DIR, "$outdir")) { localdie("Error: can't opendir $outdir: $!"); } my @dyears = grep { !/^\./ && /^\d{4}$/ } readdir(DIR); closedir DIR; foreach my $y (sort { $b <=> $a } @dyears) { print $fh qq{

Year $y

}; # foreach year directory look for week directories unless(opendir(DIR, "$outdir/$y")) { localdie("Error: can't opendir $outdir/$y: $!"); } my @ymonths = grep { /^\d{2}$/ } readdir(DIR); closedir DIR; my $i = 1; foreach my $m (sort {$a <=> $b } @ymonths) { print $fh "\n"; print $fh "\n\n" if ( ($i%4) == 0 ); $i++; } print $fh qq{
", &get_calendar($y, $m), "
}; } print $fh qq{
}; $fh->close; } sub cleanup_directory { my ($dir, $remove_dir) = @_; unless(opendir(DIR, "$dir")) { localdie("Error: can't opendir $dir: $!"); } my @todel = grep { !/^\./ } readdir(DIR); closedir DIR; map { unlink("$dir/$_"); } @todel; rmdir("$dir") if ($remove_dir); } sub write_resources { # Write resource file to report directory or return resources in and array of lines my $rscfh; my @contents = (); my $endfile = ''; my $file = ''; my $major_version = $VERSION; $major_version =~ s/\..*//; while (my $l = ) { if ($l =~ /^WRFILE: ([^\s]+)/) { $file = $1; if (!$extra_files) { if ($#contents > 0) { push(@contents, $endfile); } if ($file =~ /\.css$/i) { push(@contents, ""; } elsif ($file =~ /\.js$/i) { push(@contents, ""; } next; } $rscfh->close() if (defined $rscfh); if ($file =~ /\.css$/i) { push(@contents, "\n"); } elsif ($file =~ /\.js$/i) { push(@contents, "\n"); } if (!-e "$outdir/$major_version") { mkdir("$outdir/$major_version"); } if (!-e "$outdir/$major_version/$file") { $rscfh = new IO::File ">$outdir/$major_version/$file"; localdie("FATAL: can't write file $outdir/$major_version/$file\n") if (not defined $rscfh); } next; } if (!$extra_files) { push(@contents, $l); } else { $rscfh->print($l) if (defined $rscfh); } } $rscfh->close() if (defined $rscfh); # Return __DATA__ content if --extra-files is not used # or HTML links to resources files if (!$extra_files) { push(@contents, $endfile); } return @contents; } sub sort_by_week { my $curr = shift; my $next = shift; $a =~ /week\-(\d+)/; $curr = $1; $b =~ /week\-(\d+)/; $next = $1; return $next <=> $curr; } sub init_stats_vars { # Empty where statistics are stored %overall_stat = (); %pgb_overall_stat = (); %overall_checkpoint = (); @top_slowest = (); @top_tempfile_info = (); @top_cancelled_info = (); @top_locked_info = (); %normalyzed_info = (); %error_info = (); %pgb_error_info = (); %pgb_pool_info = (); %logs_type = (); %errors_code = (); %per_minute_info = (); %pgb_per_minute_info = (); %lock_info = (); %tempfile_info = (); %cancelled_info = (); %connection_info = (); %pgb_connection_info = (); %database_info = (); %application_info = (); %session_info = (); %pgb_session_info = (); %conn_received = (); %checkpoint_info = (); %autovacuum_info = (); %autoanalyze_info = (); @graph_values = (); %cur_info = (); $nlines = 0; %tsung_session = (); } #### # Main function called per each parser process #### sub multiprocess_progressbar { my $totalsize = shift; &logmsg('DEBUG', "Starting progressbar writer process"); $0 = 'pgbadger logger'; # Terminate the process when we haven't read the complete file but must exit local $SIG{USR1} = sub { print STDERR "\n"; exit 1; }; my $timeout = 3; my $cursize = 0; my $nqueries = 0; my $nerrors = 0; my $last = 0; $pipe->reader(); while (my $r = <$pipe>) { chomp($r); my @infos = split(/\s+/, $r); last if ($infos[0] eq 'QUIT'); $cursize += $infos[0]; $nqueries += $infos[1]; $nerrors += $infos[2]; $cursize = $totalsize if ($cursize > $totalsize); print STDERR &progress_bar($cursize, $totalsize, 25, '=', $nqueries, $nerrors); } print STDERR "\n"; exit 0; } sub update_progress_bar { my ($tmpoutfile, $nlines, $stop_offset, $totalsize, $cursize, $old_queries_count, $old_errors_count) = @_; if (!$tmpoutfile) { if ($progress && (($nlines % $NUMPROGRESS) == 0)) { if ($totalsize) { print STDERR &progress_bar($$cursize, $stop_offset || $totalsize, 25, '='); } else { print STDERR "."; } } } else { if ($progress && (($nlines % $NUMPROGRESS) == 0)) { $pipe->print("$$cursize " . ($overall_stat{'queries_number'} - $$old_queries_count) . " " . (($overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}) - $$old_errors_count) . "\n"); $$old_queries_count = $overall_stat{'queries_number'}; $$old_errors_count = $overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}; $$cursize = 0; } } } #### # Main function called per each parser process #### sub process_file { my ($logfile, $fmt, $tmpoutfile, $start_offset, $stop_offset, $chunk_pos) = @_; my $old_queries_count = 0; my $old_errors_count = 0; my $getout = 0; $start_offset ||= 0; $0 = 'pgbadger parser'; &init_stats_vars() if ($tmpoutfile); if (!$remote_host) { &logmsg('DEBUG', "Starting to parse log file: $logfile"); } else { &logmsg('DEBUG', "Starting to parse remote log file: $remote_host:$logfile"); } local $SIG{INT} = sub { print STDERR "Received SIGINT abort parsing...\n"; unlink("$PID_DIR/pgbadger.pid"); $terminate = 1; }; local $SIG{TERM} = sub { print STDERR "Received SIGTERM abort parsing...\n"; unlink("$PID_DIR/pgbadger.pid"); $terminate = 1; }; my $curdate = localtime(time); $pipe->writer() if (defined $pipe); # Syslog does not have year information, so take care of year overlapping my ($gsec, $gmin, $ghour, $gmday, $gmon, $gyear, $gwday, $gyday, $gisdst) = localtime(time); $gyear += 1900; my $CURRENT_DATE = $gyear . sprintf("%02d", $gmon + 1) . sprintf("%02d", $gmday); my $cursize = 0; # Get file handle and size of the file my ($lfile, $totalsize) = &get_log_file($logfile); # Reset the start position if file is smaller that the current start offset if ($start_offset > $totalsize) { &logmsg('DEBUG', "Starting offset $start_offset is greater than total size $totalsize for file $logfile"); &logmsg('DEBUG', "Reverting start offset $start_offset to 0 for file $logfile, stoppping offset is " . ($stop_offset || $totalsize)); $start_offset = 0 ; } # Check if the first date in the log are after the last date saved if (($fmt ne 'binary') && ($fmt ne 'csv')) { if ($start_offset && !$chunk_pos) { my ($retcode, $msg) = check_file_changed($logfile, $fmt, ($fmt eq 'pgbouncer') ? $pgb_saved_last_line{datetime} : $saved_last_line{datetime}, $start_offset, 1); if ($retcode) { &logmsg('DEBUG', "This file should be parsed from the beginning: $logfile, $msg"); &logmsg('DEBUG', "Reverting start offset $start_offset to 0 for file $logfile, stoppping offset is " . ($stop_offset || $totalsize)); $start_offset = 0; } else { &logmsg('DEBUG', "This might not be parsed: $logfile, $msg"); } $cursize = $start_offset; } } else { $start_offset = 0; $stop_offset = 0; } if ($stop_offset > 0) { $totalsize = $stop_offset - $start_offset; } my $current_offset = $start_offset || 0; # Forward the progress bar to the starting point in MP mode #$cursize = $start_offset if ($chunk_pos == 0); if (!$remote_host) { &logmsg('DEBUG', "Starting reading file $logfile..."); } else { &logmsg('DEBUG', "Starting reading file $remote_host:$logfile..."); } # Parse pgbouncer logfile if ($fmt eq 'pgbouncer') { my $time_pattern = qr/(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/; my $cur_pid = ''; my @matches = (); my $has_exclusion = 0; if ($#exclude_line >= 0) { $has_exclusion = 1; } &logmsg('DEBUG', "Start parsing at offset $start_offset of file $logfile to " . ($stop_offset || $totalsize)); if ($start_offset) { # Move to the starting offset position in file $lfile->seek($start_offset, 0); } while (my $line = <$lfile>) { # We received a signal last if ($terminate); # Get current size/offset in the log file $cursize += length($line); $current_offset += length($line); # Replace CR/LF by LF $line =~ s/\r//; # Start to exclude from parsing any desired lines if ($has_exclusion >= 0) { # Log line matches the excluded regex map { next if ($line =~ /$_/is); } @exclude_line; } chomp($line); $nlines++; next if (!$line); &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); %prefix_vars = (); @matches = ($line =~ $pgbouncer_log_parse1); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#pgb_prefix_parse1 ; $i++) { $prefix_vars{$pgb_prefix_parse1[$i]} = $matches[$i]; } # Get time detailed information ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); $getout = 2; last; } # Jump to the last line parsed if required next if (!&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); # Extract other information from the line @matches = ($line =~ $pgbouncer_log_parse2); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#pgb_prefix_parse2 ; $i++) { $prefix_vars{$pgb_prefix_parse2[$i]} = $matches[$i]; } $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv && $prefix_vars{'t_client'}); } else { # pgBouncer Statistics appears each minutes in the log if ($prefix_vars{'t_query'} =~ /Stats: (\d+) req\/s, in (\d+) b\/s, out (\d+) b\/s,query (\d+) us/) { $prefix_vars{'t_loglevel'} = 'STATS'; $prefix_vars{'t_req/s'} = $1; $prefix_vars{'t_inbytes/s'} = $2; $prefix_vars{'t_outbytes/s'} = $3; $prefix_vars{'t_avgduration'} = $4; } } # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}); # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { $prefix_vars{'t_host'} = 'stderr'; # this unused variable is used to store format information when log format is not syslog # Process the log line &parse_pgbouncer($fmt); } } else { # unknown format &logmsg('DEBUG', "Unknown pgbouncer line format: $line"); } last if (($stop_offset > 0) && ($current_offset >= $stop_offset)); } if ($last_parsed) { $pgb_last_line{current_pos} = $current_offset; } # Parse PostgreSQL log file with CSV format } elsif ($fmt eq 'csv') { require Text::CSV_XS; my $csv = Text::CSV_XS->new( { binary => 1, eol => $/, sep_char => $csv_sep_char, allow_loose_quotes => 1, } ); # Parse csvlog lines CSVLOOP: while (!$csv->eof()) { while (my $row = $csv->getline($lfile)) { $row =~ s/\r//; # We received a signal last CSVLOOP if ($terminate); # Number of columns in csvlog (22 before 9.0 and 23 from 9.0 to current) next if ( ($#{$row} != 22) && ($#{$row} != 21) ); # Set progress statistics $cursize += length(join(',', @$row)); $nlines++; &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); next if ( ($row->[11] !~ $parse_regex) || ($row->[11] eq 'LOCATION')); # Extract the date if ($row->[0] =~ m/^(\d+)-(\d+)-(\d+)\s+(\d+):(\d+):(\d+)\.(\d+)/) { # Remove newline characters from queries map { s/[\r\n]+/ /gs; } @$row; my $milli = $7 || 0; ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($1, $2, $3, $4, $5, $6); $prefix_vars{'t_timestamp'} = "$1-$2-$3 $4:$5:$6"; # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); $getout = 2; last CSVLOOP; } # Jump to the last line parsed if required next if (!&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, join(',', @$row))); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}); # Set query parameters as global variables $prefix_vars{'t_dbuser'} = $row->[1] || ''; $prefix_vars{'t_dbname'} = $row->[2] || ''; $prefix_vars{'t_appname'} = $row->[22] || ''; $prefix_vars{'t_client'} = $row->[4] || ''; $prefix_vars{'t_client'} =~ s/:.*//; $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); $prefix_vars{'t_host'} = 'csv'; # this unused variable is used to store format information when log format is not syslog $prefix_vars{'t_pid'} = $row->[3]; $prefix_vars{'t_session_line'} = $row->[5]; $prefix_vars{'t_session_line'} =~ s/\..*//; $prefix_vars{'t_loglevel'} = $row->[11]; $prefix_vars{'t_query'} = $row->[13]; # Set ERROR additional information $prefix_vars{'t_detail'} = $row->[14]; $prefix_vars{'t_hint'} = $row->[15]; $prefix_vars{'t_context'} = $row->[18]; $prefix_vars{'t_statement'} = $row->[19]; # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # Parse the query now &parse_query($fmt); # The information can be saved immediately with csvlog &store_queries($prefix_vars{'t_pid'}); delete $cur_info{$prefix_vars{'t_pid'}}; } } } if (!$csv->eof()) { warn "WARNING: cannot use CSV on $logfile, " . $csv->error_diag() . " at line " . ($nlines+1), "\n"; print STDERR "DETAIL: " . $csv->error_input(), "\n" if ($csv->error_input()); print STDERR "reset CSV parser\n"; $csv->SetDiag(0); } } } elsif ($fmt eq 'binary') { &load_stats($lfile); } else { # Format is not CSV. my $time_pattern = qr/(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/; my $cur_pid = ''; my @matches = (); my $goon = 0; my $has_exclusion = 0; if ($#exclude_line >= 0) { $has_exclusion = 1; } &logmsg('DEBUG', "Start parsing at offset $start_offset of file $logfile to " . ($stop_offset || $totalsize)); if ($start_offset) { # Move to the starting offset position in file $lfile->seek($start_offset, 0); } while (my $line = <$lfile>) { # We received a signal last if ($terminate); # Get current size/offset in the log file $cursize += length($line); $current_offset += length($line); # Replace CR/LF by LF $line =~ s/\r//; # Start to exclude from parsing any desired lines if ($has_exclusion >= 0) { # Log line matches the excluded regex map { next if ($line =~ /$_/is); } @exclude_line; } chomp($line); $nlines++; next if (!$line); &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); %prefix_vars = (); # Parse syslog lines if ($fmt =~ /syslog/) { @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } # skip non postgresql lines next if ($prefix_vars{'t_ident'} ne $ident); # Skip location information next if ($prefix_vars{'t_loglevel'} eq 'LOCATION'); # Standard syslog format does not have year information, months are # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); $prefix_vars{'t_month'} = $month_abbr{$prefix_vars{'t_month'}}; # Take care of year overlapping if ("$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}" > $CURRENT_DATE) { $prefix_vars{'t_year'} = substr($CURRENT_DATE, 0, 4) - 1; } } $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; if ($prefix_vars{'t_hostport'} && !$prefix_vars{'t_client'}) { $prefix_vars{'t_client'} = $prefix_vars{'t_hostport'}; # Remove the port part $prefix_vars{'t_client'} =~ s/\(.*//; } # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); $getout = 2; last; } # Jump to the last line parsed if required next if (!&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); $goon = 1; $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}); # Extract information from log line prefix if (!$log_line_prefix) { &parse_log_prefix($prefix_vars{'t_logprefix'}); } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # The information can be saved when we are switching to a new main message if ($cur_pid && ($prefix_vars{'t_loglevel'} =~ /^(LOG|ERROR|FATAL|PANIC|WARNING)$/)) { &store_queries($cur_pid); delete $cur_info{$cur_pid} if (!$log_duration || (($cur_info{$cur_pid}{duration} ne '') && ($cur_info{$cur_pid}{query} ne ''))); } # Process the log line &parse_query($fmt); } $cur_pid = $prefix_vars{'t_pid'}; } elsif ($goon && ($line =~ $other_syslog_line)) { $cur_pid = $8; my $t_query = $10; $t_query =~ s/#011/\t/g; next if ($t_query eq "\t"); # Some log line may be written by applications next if ($t_query =~ /\bLOG: /); # Parse orphan lines to append inforamtion to the right place &parse_orphan_line($cur_pid, $t_query); # Collect orphaned lines of multiline queries } elsif ($cur_pid) { # Some log line may be written by applications next if ($line =~ /\bLOG: /); # Parse orphan lines to append inforamtion to the right place &parse_orphan_line($cur_pid, $line); } else { &logmsg('DEBUG', "Unknown syslog line format: $line"); } } elsif ($fmt eq 'stderr') { @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { # Store auto explain plan when switching to an other log entry foreach my $p (keys %cur_plan_info) { if (exists $cur_plan_info{$p}{plan}) { # Extract the query part from the plan my $key = 'query'; my @plan = split("\n", $cur_plan_info{$p}{plan}); foreach my $l (@plan) { $key = 'plan' if ($l =~ /\(cost=\d+.*rows=\d+/); $cur_info{$p}{$key} .= "$l\n"; } $cur_info{$p}{query} =~ s/^\s*Query Text:\s+//s; delete $cur_plan_info{$p}; &store_queries($p); delete $cur_info{$p}; } } for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } $prefix_vars{'t_pid'} = $prefix_vars{'t_session_id'} if ($use_sessionid_as_pid); # Skip location information next if ($prefix_vars{'t_loglevel'} eq 'LOCATION'); if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_mtimestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_session_timestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_session_timestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_epoch'}) { $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_epoch'})); if ($prefix_vars{'t_epoch'} =~ /^\d{10}(\.\d{3})$/) { $prefix_vars{'t_timestamp'} .= $1; } } elsif ($prefix_vars{'t_timestamp'} =~ /^\d{10}(\.\d{3})$/) { my $ms = $1; $prefix_vars{'t_epoch'} = $prefix_vars{'t_timestamp'}; $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_timestamp'})); $prefix_vars{'t_timestamp'} .= $ms; } ($prefix_vars{'t_year'}, $prefix_vars{'t_month'}, $prefix_vars{'t_day'}, $prefix_vars{'t_hour'}, $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); if ($prefix_vars{'t_hostport'} && !$prefix_vars{'t_client'}) { $prefix_vars{'t_client'} = $prefix_vars{'t_hostport'}; # Remove the port part $prefix_vars{'t_client'} =~ s/\(.*//; } # Skip unwanted lines my $res = &skip_unwanted_line(); next if ($res == 1); if ($res == -1) { &update_progress_bar($tmpoutfile, $nlines, $stop_offset, $totalsize, \$cursize, \$old_queries_count, \$old_errors_count); $getout = 2; last; } # Jump to the last line parsed if required next if (!&check_incremental_position($fmt, $prefix_vars{'t_timestamp'}, $line)); $prefix_vars{'t_client'} = _gethostbyaddr($prefix_vars{'t_client'}) if ($dns_resolv); # Store the current timestamp of the log line &store_current_timestamp($prefix_vars{'t_timestamp'}); # Extract information from log line prefix if (!$log_line_prefix) { &parse_log_prefix($prefix_vars{'t_logprefix'}); } # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { # this unused variable is used to store format information # when log format is not syslog $prefix_vars{'t_host'} = 'stderr'; # The information can be saved when we are switching to a new main message if ($cur_pid && $prefix_vars{'t_loglevel'} =~ /^(LOG|ERROR|FATAL|PANIC|WARNING)$/) { &store_queries($cur_pid); delete $cur_info{$cur_pid} if (!$log_duration || (($cur_info{$cur_pid}{duration} ne '') && ($cur_info{$cur_pid}{query} ne ''))); } # Process the log line &parse_query($fmt); } $cur_pid = $prefix_vars{'t_pid'}; # Collect additional query information } elsif ($cur_pid) { # Some log line may be written by applications next if ($line =~ /\bLOG: /); # Parse orphan lines to append inforamtion to the right place &parse_orphan_line($cur_pid, $line); } else { # unknown format &logmsg('DEBUG', "Unknown line format: $line"); } } last if (($stop_offset > 0) && ($current_offset >= $stop_offset)); } if ($last_parsed) { $last_line{current_pos} = $current_offset; } } close $lfile; # Inform the parent that it should stop parsing other files if ($terminate) { kill('USR2', $parent_pid); return $terminate; } # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { # Stores last query information &store_queries($pid, 1); } # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } # Stores last cancelled queries information foreach my $pid (keys %cur_cancel_info) { &store_temporary_and_lock_infos($pid); } # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } if ($extension eq 'tsung') { foreach my $pid (sort {$a <=> $b} keys %tsung_session) { &store_tsung_session($pid); } } if ($progress && ($getout != 1)) { if (!$tmpoutfile) { if ($totalsize) { print STDERR &progress_bar($cursize, $stop_offset || $totalsize, 25, '=',$overall_stat{'queries_number'},($overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}), $logfile); print STDERR "\n"; } } else { $pipe->print("$cursize " . ($overall_stat{'queries_number'} - $old_queries_count) . " " . (($overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}) - $old_errors_count) . "\n"); } } %cur_info = (); # In incremental mode data are saved to disk per day if ($incremental && ($last_line{datetime} || (($fmt eq 'pgbouncer') && $pgb_last_line{datetime}))) { $incr_date = ($fmt eq 'pgbouncer') ? $pgb_last_line{datetime} : $last_line{datetime}; $incr_date =~ s/\s.*$//; # set path and create subdirectories if ($incr_date =~ /^(\d+)-(\d+)-(\d+)/) { mkdir("$outdir/$1") if (!-d "$outdir/$1"); mkdir("$outdir/$1/$2") if (!-d "$outdir/$1/$2"); mkdir("$outdir/$1/$2/$3") if (!-d "$outdir/$1/$2/$3"); } else { &logmsg('ERROR', "invalid incremental date: $incr_date, can not create subdirectories."); } my $bpath = $incr_date; $bpath =~ s/\-/\//g; # Mark the directory as needing index update if (open(OUT, ">>$last_parsed.tmp")) { flock(OUT, 2) || return $getout; print OUT "$incr_date\n"; close(OUT); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed.tmp, $!"); } # Save binary data my $filenum = $$; $filenum++ while (-e "$outdir/$bpath/$incr_date-$filenum.bin"); my $fhb = new IO::File ">$outdir/$bpath/$incr_date-$filenum.bin"; if (not defined $fhb) { localdie("FATAL: can't write to $outdir/$bpath/$incr_date-$filenum.bin, $!\n"); } &dump_as_binary($fhb); $fhb->close; &init_stats_vars(); } elsif ($tmpoutfile) { &dump_as_binary($tmpoutfile); $tmpoutfile->close(); } # Save last line into temporary file if ($last_parsed && (scalar keys %last_line || scalar keys %pgb_last_line)) { if (open(OUT, ">>$tmp_last_parsed")) { flock(OUT, 2) || return $getout; if ($fmt eq 'pgbouncer') { $pgb_last_line{current_pos} ||= 0; &logmsg('DEBUG', "Saving pgbouncer last parsed line into $tmp_last_parsed ($pgb_last_line{datetime}\t$pgb_last_line{current_pos})"); print OUT "pgbouncer\t$pgb_last_line{datetime}\t$pgb_last_line{current_pos}\t$pgb_last_line{orig}\n"; } else { $last_line{current_pos} ||= 0; &logmsg('DEBUG', "Saving last parsed line into $tmp_last_parsed ($last_line{datetime}\t$last_line{current_pos})"); print OUT "$last_line{datetime}\t$last_line{current_pos}\t$last_line{orig}\n"; } close(OUT); } else { &logmsg('ERROR', "can't save last parsed line into $tmp_last_parsed, $!"); } } # Inform the parent that it should stop parsing other files if ($getout) { kill('USR2', $parent_pid); } return $getout; } sub parse_orphan_line { my ($cur_pid, $line) = @_; # Store vacuum related information if ($cur_info{$cur_pid}{vacuum} && ($line =~ /^\t?(pages|tuples|buffer usage|avg read rate|system usage):/)) { if ($line =~ /^\t?(pages|tuples): (\d+) removed, (\d+) remain/) { $autovacuum_info{tables}{$cur_info{$cur_pid}{vacuum}}{$1}{removed} += $2; } if ($line =~ m#^\t?system usage: CPU .* sec elapsed (.*) sec#) { if ($1 > $autovacuum_info{peak}{system_usage}{elapsed}) { $autovacuum_info{peak}{system_usage}{elapsed} = $1; $autovacuum_info{peak}{system_usage}{table} = $cur_info{$cur_pid}{vacuum}; $autovacuum_info{peak}{system_usage}{date} = "$cur_info{$cur_pid}{year}-$cur_info{$cur_pid}{month}-$cur_info{$cur_pid}{day} " . "$cur_info{$cur_pid}{hour}:$cur_info{$cur_pid}{min}:$cur_info{$cur_pid}{sec}"; } } # stores bind parameters if parameter syntax is detected } elsif ( $cur_info{$cur_pid}{parameters} && (($line =~ /[,\s]*\$(\d+)\s=\s/) || ($line =~ /^'[^']*'$/)) ) { $cur_info{$cur_pid}{parameters} .= " $line" if (!$error_only); } elsif (exists $cur_plan_info{$cur_pid}) { $cur_plan_info{$cur_pid}{plan} .= "\n" . $line; # If we have previously stored a temporary file query, append to that query } elsif (exists $cur_temp_info{$cur_pid}{size}) { $cur_temp_info{$cur_pid}{query} .= "\n" . $line; # If we have previously stored a query that generates locks, append to that query } elsif (exists $cur_lock_info{$cur_pid}{query}) { $cur_lock_info{$cur_pid}{query} .= "\n" . $line; # If we have previously stored a cancelled query, append to that query } elsif (exists $cur_cancel_info{$cur_pid}{query}) { $cur_cancel_info{$cur_pid}{query} .= "\n" . $line; # Otherwise append the orphan line to the corresponding part of the query } else { # Append to the error statement if one is defined if (exists $cur_info{$cur_pid}{statement}) { $cur_info{$cur_pid}{statement} .= "\n" . $line if (!$nomultiline); # Append to the error context if one is defined } elsif (exists $cur_info{$cur_pid}{context}) { $cur_info{$cur_pid}{context} .= "\n" . $line; # Append to the query detail if one is defined } elsif (exists $cur_info{$cur_pid}{detail}) { $cur_info{$cur_pid}{detail} .= "\n" . $line; # After all append to the query if one is defined } elsif (exists $cur_info{$cur_pid}{query}) { $cur_info{$cur_pid}{query} .= "\n" . $line if (!$nomultiline && !$error_only); } } } # Store the current timestamp of the log line sub store_current_timestamp { my $t_timestamp = shift; $prefix_vars{'t_date'} = $t_timestamp; $prefix_vars{'t_date'} =~ s/\D+//g; if (!$overall_stat{'first_log_ts'} || ($overall_stat{'first_log_ts'} gt $t_timestamp)) { $overall_stat{'first_log_ts'} = $t_timestamp; } if (!$overall_stat{'last_log_ts'} || ($overall_stat{'last_log_ts'} lt $t_timestamp)) { $overall_stat{'last_log_ts'} = $t_timestamp; } } # Method used to check if the log file is produced by pgbouncer sub detect_pgbouncer_log { my ($file, $saved_date, $look_at_beginning) = @_; my ($lfile, $totalsize, $iscompressed) = &get_log_file($file); # Compressed files do not allow seeking if ($iscompressed) { $look_at_beginning = 1; } my ($gsec, $gmin, $ghour, $gmday, $gmon, $gyear, $gwday, $gyday, $gisdst) = localtime(time); $gyear += 1900; my $CURRENT_DATE = $gyear . sprintf("%02d", $gmon + 1) . sprintf("%02d", $gmday); %prefix_vars = (); my $startoffset = 0; # If seeking is not explicitely disabled if (!$look_at_beginning) { # do not seek if filesize is smaller than the seek position if ($saved_last_line{current_pos} < $totalsize) { $lfile->seek($saved_last_line{current_pos} || 0, 0); $startoffset = $saved_last_line{current_pos} || 0; } } my $more_lines = 0; my $ispgbouncerlog = 0; while (my $line = <$lfile>) { $more_lines++; $line =~ s/\r//; my @matches = ($line =~ $pgbouncer_log_format); if ($#matches >= 0) { $ispgbouncerlog++; for (my $i = 0 ; $i <= $#pgb_prefix_params ; $i++) { $prefix_vars{$pgb_prefix_params[$i]} = $matches[$i]; } } else { @matches = ($line =~ $pgbouncer_log_parse1); if (($#matches >= 0) && ($matches[-1] =~ /^Stats:/) ) { $ispgbouncerlog++; for (my $i = 0 ; $i <= $#pgb_prefix_parse1 ; $i++) { $prefix_vars{$pgb_prefix_params[$i]} = $matches[$i]; } } } next if (!$prefix_vars{'t_timestamp'}); if ($iscompressed) { close($lfile); return ($ispgbouncerlog, 0, "log file is compressed start at offset 0"); # This file has already been parsed } elsif ($saved_date gt $prefix_vars{'t_timestamp'}) { close($lfile); return ($ispgbouncerlog, 0, "timestamp $prefix_vars{'t_timestamp'} read at offset $startoffset is lower than saved timestamp: $saved_date"); } else { last; } } close($lfile); if (!$more_lines) { close($lfile); return ($ispgbouncerlog, 0, "there no new lines in this file"); } return ($ispgbouncerlog, 1, "reach the end of detect_pgbouncer_log() with start date: $saved_date and file size: $totalsize") ; } # Method used to check if the file stores logs after the last incremental position or not # This position should have been saved in the incremental file and read in the $last_parsed at # start up. Here we just verify that the first date in file is before the last incremental date. sub check_file_changed { my ($file, $fmt, $saved_date, $saved_pos, $look_at_beginning) = @_; my ($lfile, $totalsize, $iscompressed) = &get_log_file($file); # Compressed files do not allow seeking if ($iscompressed) { close($lfile); return (1, "log file is compressed"); } my ($gsec, $gmin, $ghour, $gmday, $gmon, $gyear, $gwday, $gyday, $gisdst) = localtime(time); $gyear += 1900; my $CURRENT_DATE = $gyear . sprintf("%02d", $gmon + 1) . sprintf("%02d", $gmday); %prefix_vars = (); my $startoffset = 0; # If seeking is not explicitely disabled if (!$look_at_beginning) { # do not seek if filesize is smaller than the seek position if ($saved_pos < $totalsize) { $lfile->seek($saved_pos || 0, 0); $startoffset = $saved_pos || 0; } } my $more_lines = 0; while (my $line = <$lfile>) { $more_lines++; $line =~ s/\r//; if ($fmt =~ /syslog/) { my @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } # Standard syslog format does not have year information, months are # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); $prefix_vars{'t_month'} = $month_abbr{$prefix_vars{'t_month'}}; # Take care of year overlapping if ("$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}" > $CURRENT_DATE) { $prefix_vars{'t_year'} = substr($CURRENT_DATE, 0, 4) - 1; } } $prefix_vars{'t_timestamp'} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } } elsif ($fmt eq 'stderr') { my @matches = ($line =~ $compiled_prefix); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#prefix_params ; $i++) { $prefix_vars{$prefix_params[$i]} = $matches[$i]; } if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_mtimestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_session_timestamp'}) { $prefix_vars{'t_timestamp'} = $prefix_vars{'t_session_timestamp'}; } elsif (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_epoch'}) { $prefix_vars{'t_timestamp'} = strftime("%Y-%m-%d %H:%M:%S", CORE::localtime($prefix_vars{'t_epoch'})); if ($prefix_vars{'t_epoch'} =~ /^\d{10}(\.\d{3})$/) { $prefix_vars{'t_timestamp'} .= $1; } } } } elsif ($fmt eq 'pgbouncer') { my @matches = ($line =~ $pgbouncer_log_parse1); if ($#matches >= 0) { for (my $i = 0 ; $i <= $#pgb_prefix_parse1 ; $i++) { $prefix_vars{$pgb_prefix_parse1[$i]} = $matches[$i]; } } } # Unwanted line next if (!$prefix_vars{'t_timestamp'}); # This file has already been parsed if ($saved_date gt $prefix_vars{'t_timestamp'}) { close($lfile); return (0, "timestamp $prefix_vars{'t_timestamp'} read at offset $startoffset is lower than saved timestamp: $saved_date"); } else { last; } } close($lfile); if (!$more_lines) { close($lfile); return (0, "there no new lines in this file"); } return (1, "reach the end of check_file_changed() with start date: $saved_date and file size: $totalsize") ; } # Method used to check if we have already reached the last parsing position in incremental mode # This position should have been saved in the incremental file and read in the $last_parsed at # start up. sub check_incremental_position { my ($fmt, $cur_date, $line) = @_; if ($last_parsed && ($fmt ne 'pgbouncer')) { if ($saved_last_line{datetime}) { if ($cur_date lt $saved_last_line{datetime}) { return 0; } elsif (!$last_line{datetime} && ($cur_date eq $saved_last_line{datetime})) { return 0 if ($line ne $saved_last_line{orig}); } } $last_line{datetime} = $cur_date; $last_line{orig} = $line; } elsif ($last_parsed) { if ($pgb_saved_last_line{datetime}) { if ($cur_date lt $pgb_saved_last_line{datetime}) { return 0; } elsif (!$pgb_last_line{datetime} && ($cur_date eq $pgb_saved_last_line{datetime})) { return 0 if ($line ne $pgb_saved_last_line{orig}); } } $pgb_last_line{datetime} = $cur_date; $pgb_last_line{orig} = $line; } # In incremental mode data are saved to disk per day if ($incremental) { $cur_date =~ s/\s.*$//; # Check if the current day has changed, if so save data $incr_date = $cur_date if (!$incr_date); if ($cur_date gt $incr_date) { # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { # Stores last queries information &store_queries($pid, 1); } # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } # Stores last cancelled queries information foreach my $pid (keys %cur_cancel_info) { &store_temporary_and_lock_infos($pid); } # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } # Stores tsung sessions if ($extension eq 'tsung') { foreach my $pid (sort {$a <=> $b} keys %tsung_session) { &store_tsung_session($pid); } } # set path and create subdirectories if ($incr_date =~ /^(\d+)-(\d+)-(\d+)/) { mkdir("$outdir/$1") if (!-d "$outdir/$1"); mkdir("$outdir/$1/$2") if (!-d "$outdir/$1/$2"); mkdir("$outdir/$1/$2/$3") if (!-d "$outdir/$1/$2/$3"); } else { &logmsg('ERROR', "invalid incremental date: $incr_date, can not create subdirectories."); } my $bpath = $incr_date; $bpath =~ s/\-/\//g; # Mark this directory as needing a reindex if (open(OUT, ">>$last_parsed.tmp")) { flock(OUT, 2) || return 1; print OUT "$incr_date\n"; close(OUT); } else { &logmsg('ERROR', "can't save last parsed line into $last_parsed.tmp, $!"); } # Save binary data my $filenum = $$; $filenum++ while (-e "$outdir/$bpath/$incr_date-$filenum.bin"); my $fhb = new IO::File ">$outdir/$bpath/$incr_date-$filenum.bin"; if (not defined $fhb) { localdie("FATAL: can't write to $outdir/$bpath/$incr_date-$filenum.bin, $!\n"); } &dump_as_binary($fhb); $fhb->close; $incr_date = $cur_date; &init_stats_vars(); } } return 1; } # Display message following the log level sub logmsg { my ($level, $str) = @_; return if ($quiet && ($level ne 'FATAL')); return if (!$debug && ($level eq 'DEBUG')); if ($level =~ /(\d+)/) { print STDERR "\t" x $1; } print STDERR "$level: $str\n"; } # Remove quote from alias for normalisation sub remove_alias { my $str = shift(); $str =~ s/'//gs; return $str; } # Normalize SQL queries by removing parameters sub normalize_query { my $orig_query = shift; return if (!$orig_query); # Remove comments $orig_query =~ s/\/\*(.*?)\*\///gs; # Set the entire query lowercase $orig_query = lc($orig_query); # Remove extra space, new line and tab characters by a single space $orig_query =~ s/\s+/ /gs; # Removed start of transaction if ($orig_query !~ /^\s*begin\s*;\s*$/) { $orig_query =~ s/^\s*begin\s*;\s*//gs } # Normalise alias with quote $orig_query =~ s/AS\s+"([^"]+)"/'AS "' . remove_alias($1) . '"'/eigs; # Remove string content $orig_query =~ s/\\'//gs; $orig_query =~ s/'[^']*'/''/gs; $orig_query =~ s/''('')+/''/gs; # Remove NULL parameters $orig_query =~ s/=\s*NULL/=''/gs; # Remove numbers $orig_query =~ s/([^a-z0-9_\$\-])-?\d+/${1}0/gs; # Remove hexadecimal numbers $orig_query =~ s/([^a-z_\$-])0x[0-9a-f]{1,10}/${1}0x/gs; # Remove bind parameters $orig_query =~ s/\$\d+/\?/gs; # Remove IN values $orig_query =~ s/\bin\s*\([\'0x,\s\?]*\)/in (...)/gs; # Remove curor names in CURSOR and IN clauses $orig_query =~ s/\b(declare|in)\s+"[^"]+"/$1 "..."/gs; return $orig_query; } sub generate_anonymized_string { my ($original, $cache, $before) = @_; # Prevent dates from being anonymized return $original if $original =~ m{\A\d\d\d\d[/:-]\d\d[/:-]\d\d\z}; return $original if $original =~ m{\A\d\d[/:-]\d\d[/:-]\d\d\d\d\z}; # Prevent dates format like DD/MM/YYYY HH24:MI:SS from being anonymized return $original if $original =~ m{\A(?:FM|FX|TM)?(?:HH|HH12|HH24|MI|SS|MS|US|SSSS|AM|A\.M\.|PM|P\.M\.|am|a\.m\.|pm|p\.m\.|Y,YYY|YYYY|YYY|YY|Y|IYYY|IYY|IY|I|BC|B\.C\.|AD|A\.D\.|bc|b\.c\.|ad|a\.d\.|MONTH|Month|month|MON|Mon|mon|MM|DAY|Day|day|DY|Dy|dy|DDD|DD|D|W|WW|IW|CC|J|Q|RM|rm|TZ|tz|[\s\/\-:])+(?:TH|th|SP)?$}; # Prevent interval from being anonymized return $original if $before =~ /interval/i; # Range of characters to use in anonymized strings my @chars = ('A'..'Z', 0..9, 'a'..'z', '-', '_', '.'); unless ($cache->{$original}) { # Actual anonymized version generation $cache->{$original} = join('', map { $chars[rand @chars] } 1..10 ); } return $cache->{$original}; } # Anonymize litteral in SQL queries by replacing parameters with fake values sub anonymize_query { my $orig_query = shift; return if (!$orig_query); # Variable to hold anonymized versions, so we can provide the same value # for the same input, within single query. my $anonymization_cache = {}; # Remove comments $orig_query =~ s/\/\*(.*?)\*\///gs; # Clean query $orig_query =~ s/\\'//g; $orig_query =~ s/('')+//g; # Anonymize each values $orig_query =~ s/([^\s]+[\s\(]*)'([^']*)'/"$1'".generate_anonymized_string($2, $anonymization_cache, $1)."'"/eg; return $orig_query; } # Format numbers with comma for better reading sub comma_numbers { return 0 if ($#_ < 0); return 0 if (!$_[0]); my $text = reverse $_[0]; $text =~ s/(\d\d\d)(?=\d)(?!\d*\.)/$1$num_sep/g; return scalar reverse $text; } # Format numbers with comma for better reading sub pretty_print_size { my $val = shift; return 0 if (!$val); if ($val >= 1125899906842624) { $val = ($val / 1125899906842624); $val = sprintf("%0.2f", $val) . " PiB"; } elsif ($val >= 1099511627776) { $val = ($val / 1099511627776); $val = sprintf("%0.2f", $val) . " TiB"; } elsif ($val >= 1073741824) { $val = ($val / 1073741824); $val = sprintf("%0.2f", $val) . " GiB"; } elsif ($val >= 1048576) { $val = ($val / 1048576); $val = sprintf("%0.2f", $val) . " MiB"; } elsif ($val >= 1024) { $val = ($val / 1024); $val = sprintf("%0.2f", $val) . " KiB"; } else { $val = $val . " B"; } return $val; } # Format duration sub convert_time { my $time = shift; return '0ms' if (!$time); my $days = int($time / 86400000); $time -= ($days * 86400000); my $hours = int($time / 3600000); $time -= ($hours * 3600000); my $minutes = int($time / 60000); $time -= ($minutes * 60000); my $seconds = int($time / 1000); $time -= ($seconds * 1000); my $milliseconds = sprintf("%.3d", $time); $days = $days < 1 ? '' : $days . 'd'; $hours = $hours < 1 ? '' : $hours . 'h'; $minutes = $minutes < 1 ? '' : $minutes . 'm'; $seconds = $seconds < 1 ? '' : $seconds . 's'; $milliseconds = $milliseconds < 1 ? '' : $milliseconds . 'ms'; if ($days || $hours || $minutes) { $milliseconds = ''; } elsif ($seconds) { $milliseconds =~ s/\.\d+//; } $milliseconds =~ s/^[0]+// if ($milliseconds !~ /\./); $time = $days . $hours . $minutes . $seconds . $milliseconds; $time = '0ms' if ($time eq ''); return $time; } # Stores the top N queries generating the biggest temporary file sub set_top_tempfile_info { my ($q, $sz, $date, $db, $user, $remote, $app) = @_; push(@top_tempfile_info, [($sz, $date, $q, $db, $user, $remote, $app)]); my @tmp_top_tempfile_info = sort {$b->[0] <=> $a->[0]} @top_tempfile_info; @top_tempfile_info = (); for (my $i = 0; $i <= $#tmp_top_tempfile_info; $i++) { push(@top_tempfile_info, $tmp_top_tempfile_info[$i]); last if ($i == $end_top); } } # Stores the top N queries cancelled sub set_top_cancelled_info { my ($q, $sz, $date, $db, $user, $remote, $app) = @_; push(@top_cancelled_info, [($sz, $date, $q, $db, $user, $remote, $app)]); my @tmp_top_cancelled_info = sort {$b->[0] <=> $a->[0]} @top_cancelled_info; @top_cancelled_info = (); for (my $i = 0; $i <= $#tmp_top_cancelled_info; $i++) { push(@top_cancelled_info, $tmp_top_cancelled_info[$i]); last if ($i == $end_top); } } # Stores the top N queries waiting the most sub set_top_locked_info { my ($q, $dt, $date, $db, $user, $remote, $app) = @_; push(@top_locked_info, [($dt, $date, $q, $db, $user, $remote, $app)]); my @tmp_top_locked_info = sort {$b->[0] <=> $a->[0]} @top_locked_info; @top_locked_info = (); for (my $i = 0; $i <= $#tmp_top_locked_info; $i++) { push(@top_locked_info, $tmp_top_locked_info[$i]); last if ($i == $end_top); } } # Stores the top N slowest queries sub set_top_slowest { my ($q, $dt, $date, $db, $user, $remote, $app, $bind, $plan) = @_; push(@top_slowest, [($dt, $date, $q, $db, $user, $remote, $app, $bind, $plan)]); my @tmp_top_slowest = sort {$b->[0] <=> $a->[0]} @top_slowest; @top_slowest = (); for (my $i = 0; $i <= $#tmp_top_slowest; $i++) { push(@top_slowest, $tmp_top_slowest[$i]); last if ($i == $end_top); } } # Stores top N slowest sample queries sub set_top_sample { my ($norm, $q, $dt, $date, $db, $user, $remote, $app, $bind, $plan) = @_; return if (!$norm || !$q); $normalyzed_info{$norm}{samples}{$dt}{query} = $q; $normalyzed_info{$norm}{samples}{$dt}{date} = $date; $normalyzed_info{$norm}{samples}{$dt}{db} = $db; $normalyzed_info{$norm}{samples}{$dt}{user} = $user; $normalyzed_info{$norm}{samples}{$dt}{remote} = $remote; $normalyzed_info{$norm}{samples}{$dt}{app} = $app; $normalyzed_info{$norm}{samples}{$dt}{bind} = $bind; $normalyzed_info{$norm}{samples}{$dt}{plan} = $plan; if ($sample > 0) { my $i = 1; foreach my $k (sort {$b <=> $a} keys %{$normalyzed_info{$norm}{samples}}) { if ($i > $sample) { delete $normalyzed_info{$norm}{samples}{$k}; } $i++; } } } # Stores top N error sample queries sub set_top_error_sample { my ($q, $date, $real_error, $detail, $context, $statement, $hint, $db, $user, $app, $remote, $sqlstate) = @_; $errors_code{$sqlstate}++ if ($sqlstate); # Stop when we have our number of samples if (!exists $error_info{$q}{date} || ($#{$error_info{$q}{date}}+1 < $sample)) { if ( ($q =~ /deadlock detected/) || ($real_error && !grep(/^\Q$real_error\E$/, @{$error_info{$q}{error}})) ) { if ($anonymize) { $context = &anonymize_query($context); $statement = &anonymize_query($statement); $detail = &anonymize_query($detail); } push(@{$error_info{$q}{date}}, $date); push(@{$error_info{$q}{detail}}, $detail); push(@{$error_info{$q}{context}}, $context); push(@{$error_info{$q}{statement}}, $statement); push(@{$error_info{$q}{hint}}, $hint); push(@{$error_info{$q}{error}}, $real_error); push(@{$error_info{$q}{db}}, $db); push(@{$error_info{$q}{user}}, $user); push(@{$error_info{$q}{app}}, $app); push(@{$error_info{$q}{remote}}, $remote); push(@{$error_info{$q}{sqlstate}}, $sqlstate); } } } # Stores top N error sample from pgbouncer log sub pgb_set_top_error_sample { my ($q, $date, $real_error, $db, $user, $remote) = @_; # Stop when we have our number of samples if (!exists $pgb_error_info{$q}{date} || ($#{$pgb_error_info{$q}{date}} < $sample)) { push(@{$pgb_error_info{$q}{date}}, $date); push(@{$pgb_error_info{$q}{error}}, $real_error); push(@{$pgb_error_info{$q}{db}}, $db); push(@{$pgb_error_info{$q}{user}}, $user); push(@{$pgb_error_info{$q}{remote}}, $remote); } } sub dump_as_text { # Global information my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($nlines); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } print $fh qq{ pgBadger :: $report_title - Global information --------------------------------------------------- Generated on $curdate Log file: $logfile_str Parsed $fmt_nlines log entries in $total_time Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'} }; # Dump normalized queries only if requested if ($dump_normalized_only) { print $fh "Count\t\tQuery\n"; print $fh '-'x70,"\n"; foreach my $q (sort { $normalyzed_info{$b}{count} <=> $normalyzed_info{$a}{count} } keys %normalyzed_info) { print $fh "$normalyzed_info{$q}{count} $q\n"; } print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; return; } # Overall statistics my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info); my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}); my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); $overall_stat{'first_query_ts'} ||= '-'; $overall_stat{'last_query_ts'} ||= '-'; print $fh qq{ - Overall statistics --------------------------------------------------- Number of unique normalized queries: $fmt_unique Number of queries: $fmt_queries Total query duration: $fmt_duration First query: $overall_stat{'first_query_ts'} Last query: $overall_stat{'last_query_ts'} }; foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { print $fh "Query peak: ", &comma_numbers($overall_stat{'peak'}{$_}{query}), " queries/s at $_"; last; } if (!$disable_error) { my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}); my $fmt_unique_error = &comma_numbers(scalar keys %error_info); print $fh qq{ Number of events: $fmt_errors Number of unique normalized events: $fmt_unique_error }; } if ($tempfile_info{count}) { my $fmt_temp_maxsise = &pretty_print_size($tempfile_info{maxsize}); my $fmt_temp_avsize = &pretty_print_size(sprintf("%.2f", ($tempfile_info{size} / $tempfile_info{count}))); print $fh qq{Number temporary files: $tempfile_info{count} Max size of temporary files: $fmt_temp_maxsise Average size of temporary files: $fmt_temp_avsize }; } if ($cancelled_info{count}) { print $fh qq{Number cancelled queries: $cancelled_info{count} }; } if (!$disable_session && $session_info{count}) { my $avg_session_duration = &convert_time($session_info{duration} / $session_info{count}); my $tot_session_duration = &convert_time($session_info{duration}); my $avg_queries = &comma_numbers(int($overall_stat{'queries_number'}/$session_info{count})); my $avg_duration = &convert_time(int($overall_stat{'queries_duration'}/$session_info{count})); print $fh qq{Total number of sessions: $session_info{count} Total duration of sessions: $tot_session_duration Average duration of sessions: $avg_session_duration Average queries per sessions: $avg_queries Average queries duration per sessions: $avg_duration }; foreach (sort {$overall_stat{'peak'}{$b}{session} <=> $overall_stat{'peak'}{$a}{session}} keys %{$overall_stat{'peak'}}) { next if (!$session_info{count}); print $fh "Session peak: ", &comma_numbers($overall_stat{'peak'}{$_}{session}), " sessions at $_"; last; } } if (!$disable_connection && $connection_info{count}) { print $fh "Total number of connections: $connection_info{count}\n"; foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { if ($overall_stat{'peak'}{$_}{connection} > 0) { print $fh "Connection peak: ", &comma_numbers($overall_stat{'peak'}{$_}{connection}), " conn/s at $_"; } last; } } if (scalar keys %database_info > 1) { print $fh "Total number of databases: ", scalar keys %database_info, "\n"; } if (!$disable_hourly && $overall_stat{'queries_number'}) { print $fh qq{ - Hourly statistics ---------------------------------------------------- Report not supported by text format }; } # INSERT/DELETE/UPDATE/SELECT repartition my $totala = 0; foreach my $a (@SQL_ACTION) { $totala += $overall_stat{$a}; } if (!$disable_type && $totala) { my $total = $overall_stat{'queries_number'} || 1; print $fh "\n- Queries by type ------------------------------------------------------\n\n"; print $fh "Type Count Percentage\n"; foreach my $a (@SQL_ACTION) { print $fh "$a: ", &comma_numbers($overall_stat{$a}), " ", sprintf("%0.2f", ($overall_stat{$a} * 100) / $total), "%\n"; } print $fh "OTHERS: ", &comma_numbers($total - $totala), " ", sprintf("%0.2f", (($total - $totala) * 100) / $total), "%\n" if (($total - $totala) > 0); print $fh "\n"; # Show request per database statistics if (scalar keys %database_info > 1) { print $fh "\n- Request per database ------------------------------------------------------\n\n"; print $fh "Database Request type Count Duration\n"; foreach my $d (sort keys %database_info) { print $fh "$d - ", &comma_numbers($database_info{$d}{count}), " ", &convert_time($database_info{$d}{duration}), "\n"; foreach my $r (sort keys %{$database_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($database_info{$d}{$r}), " ", &convert_time($database_info{$d}{"$r|duration"}), "\n"; } } } # Show request per application statistics if (scalar keys %application_info > 1) { print $fh "\n- Request per application ------------------------------------------------------\n\n"; print $fh "Application Request type Count Duration\n"; foreach my $d (sort keys %application_info) { print $fh "$d - ", &comma_numbers($application_info{$d}{count}), " ", &convert_time($application_info{$d}{duration}), "\n"; foreach my $r (sort keys %{$application_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($application_info{$d}{$r}), " ", &convert_time($application_info{$d}{"$r|duration"}), "\n"; } } } # Show request per user statistics if (scalar keys %user_info > 1) { print $fh "\n- Request per user ------------------------------------------------------\n\n"; print $fh "User Request type Count duration\n"; foreach my $d (sort keys %user_info) { print $fh "$d - ", &comma_numbers($user_info{$d}{count}), " ", &convert_time($user_info{$d}{duration}), "\n"; foreach my $r (sort keys %{$user_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($user_info{$d}{$r}), " ", &convert_time($user_info{$d}{"$r|duration"}), "\n"; } } } # Show request per host statistics if (scalar keys %host_info > 1) { print $fh "\n- Request per host ------------------------------------------------------\n\n"; print $fh "Host Request type Count Duration\n"; foreach my $d (sort keys %host_info) { print $fh "$d - ", &comma_numbers($host_info{$d}{count}), " ", &convert_time($host_info{$d}{duration}), "\n"; foreach my $r (sort keys %{$host_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); print $fh "\t$r ", &comma_numbers($host_info{$d}{$r}), " ", &convert_time($host_info{$d}{"$r|duration"}), "\n"; } } } } if (!$disable_lock && scalar keys %lock_info > 0) { print $fh "\n- Locks by type ------------------------------------------------------\n\n"; print $fh "Type Object Count Total Duration Avg duration (s)\n"; my $total_count = 0; my $total_duration = 0; foreach my $t (sort keys %lock_info) { print $fh "$t\t\t", &comma_numbers($lock_info{$t}{count}), " ", &convert_time($lock_info{$t}{duration}), " ", &convert_time($lock_info{$t}{duration} / $lock_info{$t}{count}), "\n"; foreach my $o (sort keys %{$lock_info{$t}}) { next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); print $fh "\t$o\t", &comma_numbers($lock_info{$t}{$o}{count}), " ", &convert_time($lock_info{$t}{$o}{duration}), " ", &convert_time($lock_info{$t}{$o}{duration} / $lock_info{$t}{$o}{count}), "\n"; } $total_count += $lock_info{$t}{count}; $total_duration += $lock_info{$t}{duration}; } print $fh "Total:\t\t\t", &comma_numbers($total_count), " ", &convert_time($total_duration), " ", &convert_time($total_duration / ($total_count || 1)), "\n"; } # Show session per database statistics if (!$disable_session && exists $session_info{database}) { print $fh "\n- Sessions per database ------------------------------------------------------\n\n"; print $fh "Database Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{database}}) { print $fh "$d - ", &comma_numbers($session_info{database}{$d}{count}), " ", &convert_time($session_info{database}{$d}{duration}), " ", &convert_time($session_info{database}{$d}{duration} / $session_info{database}{$d}{count}), "\n"; } } # Show session per user statistics if (!$disable_session && exists $session_info{user}) { print $fh "\n- Sessions per user ------------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{user}}) { print $fh "$d - ", &comma_numbers($session_info{user}{$d}{count}), " ", &convert_time($session_info{user}{$d}{duration}), " ", &convert_time($session_info{user}{$d}{duration} / $session_info{user}{$d}{count}), "\n"; } } # Show session per host statistics if (!$disable_session && exists $session_info{host}) { print $fh "\n- Sessions per host ------------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{host}}) { print $fh "$d - ", &comma_numbers($session_info{host}{$d}{count}), " ", &convert_time($session_info{host}{$d}{duration}), " ", &convert_time($session_info{host}{$d}{duration} / $session_info{host}{$d}{count}), "\n"; } } # Show session per application statistics if (!$disable_session && exists $session_info{app}) { print $fh "\n- Sessions per application ------------------------------------------------------\n\n"; print $fh "Application Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$session_info{app}}) { print $fh "$d - ", &comma_numbers($session_info{app}{$d}{count}), " ", &convert_time($session_info{app}{$d}{duration}), " ", &convert_time($session_info{app}{$d}{duration} / $session_info{app}{$d}{count}), "\n"; } } # Show connection per database statistics if (!$disable_connection && exists $connection_info{database}) { print $fh "\n- Connections per database ------------------------------------------------------\n\n"; print $fh "Database User Count\n"; foreach my $d (sort keys %{$connection_info{database}}) { print $fh "$d - ", &comma_numbers($connection_info{database}{$d}), "\n"; foreach my $u (sort keys %{$connection_info{user}}) { next if (!exists $connection_info{database_user}{$d}{$u}); print $fh "\t$u ", &comma_numbers($connection_info{database_user}{$d}{$u}), "\n"; } } } # Show connection per user statistics if (!$disable_connection && exists $connection_info{user}) { print $fh "\n- Connections per user ------------------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$connection_info{user}}) { print $fh "$d - ", &comma_numbers($connection_info{user}{$d}), "\n"; } } # Show connection per host statistics if (!$disable_connection && exists $connection_info{host}) { print $fh "\n- Connections per host ------------------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$connection_info{host}}) { print $fh "$d - ", &comma_numbers($connection_info{host}{$d}), "\n"; } } # Show lock wait detailed information if (!$disable_lock && scalar keys %lock_info > 0) { my @top_locked_queries; foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{locks})) { push (@top_locked_queries, [$h, $normalyzed_info{$h}{locks}{count}, $normalyzed_info{$h}{locks}{wait}, $normalyzed_info{$h}{locks}{minwait}, $normalyzed_info{$h}{locks}{maxwait}]); } } # Most frequent waiting queries (N) @top_locked_queries = sort {$b->[2] <=> $a->[2]} @top_locked_queries; print $fh "\n- Most frequent waiting queries (N) -----------------------------------------\n\n"; print $fh "Rank Count Total wait time (s) Min/Max/Avg duration (s) Query\n"; for (my $i = 0 ; $i <= $#top_locked_queries ; $i++) { last if ($i > $end_top); print $fh ($i + 1), ") ", $top_locked_queries[$i]->[1], " - ", &convert_time($top_locked_queries[$i]->[2]), " - ", &convert_time($top_locked_queries[$i]->[3]), "/", &convert_time($top_locked_queries[$i]->[4]), "/", &convert_time(($top_locked_queries[$i]->[2] / $top_locked_queries[$i]->[1])), " - ", $top_locked_queries[$i]->[0], "\n"; print $fh "--\n"; my $k = $top_locked_queries[$i]->[0]; my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($j > $sample); my $ttl = $top_locked_info[$i]->[1] || ''; my $db = " - $normalyzed_info{$k}{samples}{$d}{date} - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), "$db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $j++; } } print $fh "\n"; @top_locked_queries = (); # Queries that waited the most @top_locked_info = sort {$b->[1] <=> $a->[1]} @top_locked_info; print $fh "\n- Queries that waited the mosts ---------------------------------------------\n\n"; print $fh "Rank Wait time (s) Query\n"; for (my $i = 0 ; $i <= $#top_locked_info ; $i++) { my $ttl = $top_locked_info[$i]->[1] || ''; my $db = " - database: $top_locked_info[$i]->[3]" if ($top_locked_info[$i]->[3]); $db .= ", user: $top_locked_info[$i]->[4]" if ($top_locked_info[$i]->[4]); $db .= ", remote: $top_locked_info[$i]->[5]" if ($top_locked_info[$i]->[5]); $db .= ", app: $top_locked_info[$i]->[6]" if ($top_locked_info[$i]->[6]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &convert_time($top_locked_info[$i]->[0]), " $ttl$db - ", $top_locked_info[$i]->[2], "\n"; print $fh "--\n"; } print $fh "\n"; } # Show temporary files detailed information if (!$disable_temporary && scalar keys %tempfile_info > 0) { my @top_temporary; foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{tempfiles})) { push (@top_temporary, [$h, $normalyzed_info{$h}{tempfiles}{count}, $normalyzed_info{$h}{tempfiles}{size}, $normalyzed_info{$h}{tempfiles}{minsize}, $normalyzed_info{$h}{tempfiles}{maxsize}]); } } # Queries generating the most temporary files (N) @top_temporary = sort {$b->[1] <=> $a->[1]} @top_temporary; print $fh "\n- Queries generating the most temporary files (N) ---------------------------\n\n"; print $fh "Rank Count Total size Min/Max/Avg size Query\n"; my $idx = 1; for (my $i = 0 ; $i <= $#top_temporary ; $i++) { last if ($i > $end_top); print $fh $idx, ") ", $top_temporary[$i]->[1], " - ", &comma_numbers($top_temporary[$i]->[2]), " - ", &comma_numbers($top_temporary[$i]->[3]), "/", &comma_numbers($top_temporary[$i]->[4]), "/", &comma_numbers(sprintf("%.2f", $top_temporary[$i]->[2] / $top_temporary[$i]->[1])), " - ", $top_temporary[$i]->[0], "\n"; print $fh "--\n"; my $k = $top_temporary[$i]->[0]; if (scalar keys %{$normalyzed_info{$k}{samples}}) { my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($j > $sample); my $db = "$normalyzed_info{$k}{samples}{$d}{date} - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), " - $db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $j++; } } $idx++; } @top_temporary = (); # Top queries generating the largest temporary files @top_tempfile_info = sort {$b->[1] <=> $a->[1]} @top_tempfile_info; print $fh "\n- Queries generating the largest temporary files ----------------------------\n\n"; print $fh "Rank Size Query\n"; for (my $i = 0 ; $i <= $#top_tempfile_info ; $i++) { my $ttl = $top_tempfile_info[$i]->[1] || ''; my $db = " - database: $top_tempfile_info[$i]->[3]" if ($top_tempfile_info[$i]->[3]); $db .= ", user: $top_tempfile_info[$i]->[4]" if ($top_tempfile_info[$i]->[4]); $db .= ", remote: $top_tempfile_info[$i]->[5]" if ($top_tempfile_info[$i]->[5]); $db .= ", app: $top_tempfile_info[$i]->[6]" if ($top_tempfile_info[$i]->[6]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &comma_numbers($top_tempfile_info[$i]->[0]), " - $ttl$db - ", $top_tempfile_info[$i]->[2], "\n"; } print $fh "\n"; } # Show cancelled queries detailed information if (!$disable_query && scalar keys %cancelled_info > 0) { my @top_cancelled; foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{cancelled})) { push (@top_cancelled, [$h, $normalyzed_info{$h}{cancelled}{count}]); } } # Queries generating the most cancelled files (N) @top_cancelled = sort {$b->[1] <=> $a->[1]} @top_cancelled; print $fh "\n- Queries most cancelled (N) ---------------------------\n\n"; print $fh "Rank Count Query\n"; my $idx = 1; for (my $i = 0 ; $i <= $#top_cancelled ; $i++) { last if ($i > $end_top); print $fh $idx, ") ", $top_cancelled[$i]->[1], " - ", $top_cancelled[$i]->[0], "\n"; print $fh "--\n"; my $k = $top_cancelled[$i]->[0]; if (scalar keys %{$normalyzed_info{$k}{samples}}) { my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($j > $sample); my $db = "$normalyzed_info{$k}{samples}{$d}{date} - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), " - $db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $j++; } } $idx++; } @top_cancelled = (); # Top queries generating the largest cancelled files @top_cancelled_info = sort {$b->[1] <=> $a->[1]} @top_cancelled_info; print $fh "\n- Queries generating the most cancellation ----------------------------\n\n"; print $fh "Rank Times cancelled Query\n"; for (my $i = 0 ; $i <= $#top_cancelled_info ; $i++) { my $ttl = $top_cancelled_info[$i]->[1] || ''; my $db = " - database: $top_cancelled_info[$i]->[3]" if ($top_cancelled_info[$i]->[3]); $db .= ", user: $top_cancelled_info[$i]->[4]" if ($top_cancelled_info[$i]->[4]); $db .= ", remote: $top_cancelled_info[$i]->[5]" if ($top_cancelled_info[$i]->[5]); $db .= ", app: $top_cancelled_info[$i]->[6]" if ($top_cancelled_info[$i]->[6]); $db =~ s/^, / - /; print $fh ($i + 1), ") ", &comma_numbers($top_cancelled_info[$i]->[0]), " - $ttl$db - ", $top_cancelled_info[$i]->[2], "\n"; } print $fh "\n"; } # Show top information if (!$disable_query && ($#top_slowest >= 0)) { print $fh "\n- Slowest queries ------------------------------------------------------\n\n"; print $fh "Rank Duration (s) Query\n"; for (my $i = 0 ; $i <= $#top_slowest ; $i++) { my $db = " database: $top_slowest[$i]->[3]" if ($top_slowest[$i]->[3]); $db .= ", user: $top_slowest[$i]->[4]" if ($top_slowest[$i]->[4]); $db .= ", remote: $top_slowest[$i]->[5]" if ($top_slowest[$i]->[5]); $db .= ", app: $top_slowest[$i]->[6]" if ($top_slowest[$i]->[6]); $db .= ", bind query: yes" if ($top_slowest[$i]->[7]); $db =~ s/^, //; print $fh $i + 1, ") " . &convert_time($top_slowest[$i]->[0]) . "$db - $top_slowest[$i]->[2]\n"; print $fh "--\n"; } print $fh "\n- Queries that took up the most time (N) -------------------------------\n\n"; print $fh "Rank Total duration Times executed Min/Max/Avg duration (s) Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$b}{duration} <=> $normalyzed_info{$a}{duration}} keys %normalyzed_info) { next if (!$normalyzed_info{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$k}{samples}}) { $q = $normalyzed_info{$k}{samples}{$_}{query}; last; } } $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; print $fh "$idx) " . &convert_time($normalyzed_info{$k}{duration}) . " - " . &comma_numbers($normalyzed_info{$k}{count}) . " - " . &convert_time($normalyzed_info{$k}{min}) . "/" . &convert_time($normalyzed_info{$k}{max}) . "/" . &convert_time($normalyzed_info{$k}{average}) . " - $q\n"; print $fh "--\n"; my $j = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($j > $sample); my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\t- Example $j: ", &convert_time($d), "$db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $j++; } $idx++; } } if (!$disable_query && (scalar keys %normalyzed_info > 0)) { print $fh "\n- Most frequent queries (N) --------------------------------------------\n\n"; print $fh "Rank Times executed Total duration Min/Max/Avg duration (s) Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$b}{count} <=> $normalyzed_info{$a}{count}} keys %normalyzed_info) { next if (!$normalyzed_info{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$k}{samples}}) { $q = $normalyzed_info{$k}{samples}{$_}{query}; last; } } print $fh "$idx) " . &comma_numbers($normalyzed_info{$k}{count}) . " - " . &convert_time($normalyzed_info{$k}{duration}) . " - " . &convert_time($normalyzed_info{$k}{min}) . "/" . &convert_time($normalyzed_info{$k}{max}) . "/" . &convert_time($normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}) . " - $q\n"; print $fh "--\n"; my $i = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($i > $sample); my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{bind}); $db =~ s/^, / - /; print $fh "\tExample $i: ", &convert_time($d), "$db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $i++; } $idx++; } } if (!$disable_query && ($#top_slowest >= 0)) { print $fh "\n- Slowest queries (N) --------------------------------------------------\n\n"; print $fh "Rank Min/Max/Avg duration (s) Times executed Total duration Query\n"; my $idx = 1; foreach my $k (sort {$normalyzed_info{$b}{average} <=> $normalyzed_info{$a}{average}} keys %normalyzed_info) { next if (!$normalyzed_info{$k}{count}); last if ($idx > $top); my $q = $k; if ($normalyzed_info{$k}{count} == 1) { foreach (keys %{$normalyzed_info{$k}{samples}}) { $q = $normalyzed_info{$k}{samples}{$_}{query}; last; } } print $fh "$idx) " . &convert_time($normalyzed_info{$k}{min}) . "/" . &convert_time($normalyzed_info{$k}{max}) . "/" . &convert_time($normalyzed_info{$k}{average}) . " - " . &comma_numbers($normalyzed_info{$k}{count}) . " - " . &convert_time($normalyzed_info{$k}{duration}) . " - $q\n"; print $fh "--\n"; my $i = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($i > $sample); my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); $db .= ", bind query: yes" if ($normalyzed_info{$k}{samples}{$d}{yes}); $db =~ s/^, / - /; print $fh "\tExample $i: ", &convert_time($d), "$db - ", $normalyzed_info{$k}{samples}{$d}{query}, "\n"; $i++; } $idx++; } } @top_slowest = (); if (!$disable_error) { &show_error_as_text(); } # Show pgbouncer session per database statistics if (exists $pgb_session_info{database}) { print $fh "\n- pgBouncer sessions per database --------------------------------------------\n\n"; print $fh "Database Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{database}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{database}{$d}{count}), " ", &convert_time($pgb_session_info{database}{$d}{duration}), " ", &convert_time($pgb_session_info{database}{$d}{duration} / $pgb_session_info{database}{$d}{count}), "\n"; } } # Show pgbouncer session per user statistics if (exists $pgb_session_info{user}) { print $fh "\n- pgBouncer sessions per user ------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{user}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{user}{$d}{count}), " ", &convert_time($pgb_session_info{user}{$d}{duration}), " ", &convert_time($pgb_session_info{user}{$d}{duration} / $pgb_session_info{user}{$d}{count}), "\n"; } } # Show pgbouncer session per host statistics if (exists $pgb_session_info{host}) { print $fh "\n- pgBouncer sessions per host ------------------------------------------------\n\n"; print $fh "User Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{host}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{host}{$d}{count}), " ", &convert_time($pgb_session_info{host}{$d}{duration}), " ", &convert_time($pgb_session_info{host}{$d}{duration} / $pgb_session_info{host}{$d}{count}), "\n"; } } # Show pgbouncer session per application statistics if (exists $pgb_session_info{app}) { print $fh "\n- pgBouncer sessions per application -----------------------------------------\n\n"; print $fh "Application Count Total Duration Avg duration (s)\n"; foreach my $d (sort keys %{$pgb_session_info{app}}) { print $fh "$d - ", &comma_numbers($pgb_session_info{app}{$d}{count}), " ", &convert_time($pgb_session_info{app}{$d}{duration}), " ", &convert_time($pgb_session_info{app}{$d}{duration} / $pgb_session_info{app}{$d}{count}), "\n"; } } # Show pgbouncer connection per database statistics if (exists $pgb_connection_info{database}) { print $fh "\n- pgBouncer connections per database -----------------------------------------\n\n"; print $fh "Database User Count\n"; foreach my $d (sort keys %{$pgb_connection_info{database}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{database}{$d}), "\n"; foreach my $u (sort keys %{$pgb_connection_info{user}}) { next if (!exists $pgb_connection_info{database_user}{$d}{$u}); print $fh "\t$u ", &comma_numbers($pgb_connection_info{database_user}{$d}{$u}), "\n"; } } } # Show pgbouncer connection per user statistics if (exists $pgb_connection_info{user}) { print $fh "\n- pgBouncer connections per user ---------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$pgb_connection_info{user}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{user}{$d}), "\n"; } } # Show pgbouncer connection per host statistics if (exists $pgb_connection_info{host}) { print $fh "\n- pgBouncer connections per host --------------------------------------------\n\n"; print $fh "User Count\n"; foreach my $d (sort keys %{$pgb_connection_info{host}}) { print $fh "$d - ", &comma_numbers($pgb_connection_info{host}{$d}), "\n"; } } if (!$disable_error) { &show_pgb_error_as_text(); } print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; } sub dump_error_as_text { # Global information my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($nlines); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } $report_title ||= 'PostgreSQL Log Analyzer'; print $fh qq{ pgBadger :: $report_title - Global information --------------------------------------------------- Generated on $curdate Log file: $logfile_str Parsed $fmt_nlines log entries in $total_time Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'} }; &show_error_as_text(); print $fh "\n\n"; &show_pgb_error_as_text(); print $fh "\n\n"; print $fh "Report generated by pgBadger $VERSION ($project_url).\n"; } # We change temporary log level from LOG to ERROR # to store these messages into the error report sub change_log_level { my $msg = shift; return 1 if ($msg =~ /parameter "[^"]+" changed to "[^"]+"/); return 1 if ($msg =~ /database system was/); return 1 if ($msg =~ /recovery has paused/); return 1 if ($msg =~ /ending cancel to blocking autovacuum/); return 1 if ($msg =~ /skipping analyze of/); return 1 if ($msg =~ /using stale statistics/); return 1 if ($msg =~ /replication command:/); return 1 if ($msg =~ /still waiting for/); return 0; } sub revert_log_level { my $msg = shift; return ($msg, 1) if ($msg =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (database system was)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (recovery has paused)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (sending cancel to blocking autovacuum)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (skipping analyze of)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (using stale statistics)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (received replication command:)/LOG: $1/); return ($msg, 1) if ($msg =~ s/ERROR: (.*still waiting for)/LOG: $1/); return ($msg, 0); } sub show_error_as_text { return if (scalar keys %error_info == 0); print $fh "\n- Most frequent events (N) ---------------------------------------------\n\n"; my $idx = 1; foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); last if ($idx > $top); last if (!$error_info{$k}{count}); my ($msg, $ret) = &revert_log_level($k); if ($error_info{$k}{count} > 1) { print $fh "$idx) " . &comma_numbers($error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; my $j = 1; for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) { last if ($i == $sample); ($error_info{$k}{error}[$i], $ret) = &revert_log_level($error_info{$k}{error}[$i]); if ($msg) { $logs_type{ERROR}--; $logs_type{LOG}++; } print $fh "\t- Example $j: $error_info{$k}{date}[$i] - $error_info{$k}{error}[$i]\n"; print $fh "\t\tDetail: $error_info{$k}{detail}[$i]\n" if ($error_info{$k}{detail}[$i]); print $fh "\t\tContext: $error_info{$k}{context}[$i]\n" if ($error_info{$k}{context}[$i]); print $fh "\t\tHint: $error_info{$k}{hint}[$i]\n" if ($error_info{$k}{hint}[$i]); print $fh "\t\tStatement: $error_info{$k}{statement}[$i]\n" if ($error_info{$k}{statement}[$i]); print $fh "\t\tDatabase: $error_info{$k}{db}[$i]\n" if ($error_info{$k}{db}[$i]); $j++; } } elsif ($error_info{$k}{error}[0]) { ($error_info{$k}{error}[0], $ret) = &revert_log_level($error_info{$k}{error}[0]); if ($msg) { $logs_type{ERROR}--; $logs_type{LOG}++; } if ($sample) { print $fh "$idx) " . &comma_numbers($error_info{$k}{count}) . " - $error_info{$k}{error}[0]\n"; print $fh "--\n"; print $fh "\t- Date: $error_info{$k}{date}[0]\n"; print $fh "\t\tDetail: $error_info{$k}{detail}[0]\n" if ($error_info{$k}{detail}[0]); print $fh "\t\tContext: $error_info{$k}{context}[0]\n" if ($error_info{$k}{context}[0]); print $fh "\t\tHint: $error_info{$k}{hint}[0]\n" if ($error_info{$k}{hint}[0]); print $fh "\t\tStatement: $error_info{$k}{statement}[0]\n" if ($error_info{$k}{statement}[0]); print $fh "\t\tDatabase: $error_info{$k}{db}[0]\n" if ($error_info{$k}{db}[0]); } else { print $fh "$idx) " . &comma_numbers($error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; } } $idx++; } if (scalar keys %logs_type > 0) { print $fh "\n- Logs per type ---------------------------------------------\n\n"; my $total_logs = 0; foreach my $d (keys %logs_type) { $total_logs += $logs_type{$d}; } print $fh "Logs type Count Percentage\n"; foreach my $d (sort keys %logs_type) { next if (!$logs_type{$d}); print $fh "$d\t\t", &comma_numbers($logs_type{$d}), "\t", sprintf("%0.2f", ($logs_type{$d} * 100) / $total_logs), "%\n"; } } if (scalar keys %errors_code > 0) { print $fh "\n- Logs per type ---------------------------------------------\n\n"; my $total_logs = 0; foreach my $d (keys %errors_code) { $total_logs += $errors_code{$d}; } print $fh "Errors class code Count Percentage\n"; foreach my $d (sort keys %errors_code) { next if (!$errors_code{$d}); print $fh "$CLASS_ERROR_CODE{$d}\t$d\t\t", &comma_numbers($errors_code{$d}), "\t", sprintf("%0.2f", ($errors_code{$d} * 100) / $total_logs), "%\n"; } } } sub show_pgb_error_as_text { return if (scalar keys %pgb_error_info == 0); print $fh "\n- Most frequent events (N) ---------------------------------------------\n\n"; my $idx = 1; foreach my $k (sort {$pgb_error_info{$b}{count} <=> $pgb_error_info{$a}{count}} keys %pgb_error_info) { next if (!$pgb_error_info{$k}{count}); last if ($idx > $top); my $msg = $k; if ($pgb_error_info{$k}{count} > 1) { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; my $j = 1; for (my $i = 0 ; $i <= $#{$pgb_error_info{$k}{date}} ; $i++) { last if ($i == $sample); print $fh "\t- Example $j: $pgb_error_info{$k}{date}[$i] - $pgb_error_info{$k}{error}[$i]\n"; print $fh "\t\tDatabase: $pgb_error_info{$k}{db}[$i]\n" if ($pgb_error_info{$k}{db}[$i]); print $fh "\t\tUser: $pgb_error_info{$k}{user}[$i]\n" if ($pgb_error_info{$k}{user}[$i]); print $fh "\t\tClient: $pgb_error_info{$k}{remote}[$i]\n" if ($pgb_error_info{$k}{remote}[$i]); $j++; } } else { if ($sample) { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $pgb_error_info{$k}{error}[0]\n"; print $fh "--\n"; print $fh "\t- Date: $pgb_error_info{$k}{date}[0]\n"; print $fh "\t\tDatabase: $pgb_error_info{$k}{db}[0]\n" if ($pgb_error_info{$k}{db}[0]); print $fh "\t\tUser: $pgb_error_info{$k}{user}[0]\n" if ($pgb_error_info{$k}{user}[0]); print $fh "\t\tClient: $pgb_error_info{$k}{remote}[0]\n" if ($pgb_error_info{$k}{remote}[0]); } else { print $fh "$idx) " . &comma_numbers($pgb_error_info{$k}{count}) . " - $msg\n"; print $fh "--\n"; } } $idx++; } } sub html_header { my $uri = shift; my $date = localtime(time); my $global_info = &print_global_information(); my @tmpjscode = @jscode; map { s/EDIT_URI/$uri/; } @tmpjscode; my $local_title = 'PostgreSQL Log Analyzer'; if ($report_title) { $local_title = $report_title; } $report_title ||= 'pgBadger'; print $fh qq{ pgBadger :: $local_title @tmpjscode


  • Global information

    $global_info
  • }; } sub html_footer { print $fh qq{
}; } # Create global information section sub print_global_information { my $curdate = localtime(time); my $fmt_nlines = &comma_numbers($nlines); my $t3 = Benchmark->new; my $td = timediff($t3, $t0); my $total_time = timestr($td); $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; $total_time = &convert_time($total_time * 1000); my $logfile_str = $log_files[0]; if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } return qq{
  • Generated on $curdate
  • Log file: $logfile_str
  • Parsed $fmt_nlines log entries in $total_time
  • Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'}
}; } sub print_overall_statistics { my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info); my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}); my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); $overall_stat{'first_query_ts'} ||= '-'; $overall_stat{'last_query_ts'} ||= '-'; my $query_peak = 0; my $query_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { $query_peak = &comma_numbers($overall_stat{'peak'}{$_}{query}); $query_peak_date = $_ if ($query_peak); last; } my $avg_queries = &comma_numbers(int($overall_stat{'queries_number'}/($session_info{count} || 1))); my $avg_duration = &convert_time(int($overall_stat{'queries_duration'}/($session_info{count} || 1))); my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}); my $fmt_unique_error = &comma_numbers(scalar keys %error_info); my $autovacuum_count = &comma_numbers($autovacuum_info{count}); my $autoanalyze_count = &comma_numbers($autoanalyze_info{count}); my $tempfile_count = &comma_numbers($tempfile_info{count}); my $cancelled_count = &comma_numbers($cancelled_info{count}); my $fmt_temp_maxsise = &pretty_print_size($tempfile_info{maxsize}); my $fmt_temp_avsize = &pretty_print_size(sprintf("%.2f", $tempfile_info{size} / ($tempfile_info{count} || 1))); my $session_count = &comma_numbers($session_info{count}); my $avg_session_duration = &convert_time($session_info{duration} / ($session_info{count} || 1)); my $tot_session_duration = &convert_time($session_info{duration}); my $connection_count = &comma_numbers($connection_info{count}); my $connection_peak = 0; my $connection_peak_date = ''; my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { $connection_peak = &comma_numbers($overall_stat{'peak'}{$_}{connection}); $connection_peak_date = $_ if ($connection_peak); last; } foreach (sort {$overall_stat{'peak'}{$b}{session} <=> $overall_stat{'peak'}{$a}{session}} keys %{$overall_stat{'peak'}}) { next if (!$session_count); $session_peak = &comma_numbers($overall_stat{'peak'}{$_}{session}); $session_peak_date = $_ if ($session_peak); last; } my $main_error = 0; my $total = 0; foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); $main_error = &comma_numbers($error_info{$k}{count}) if (!$main_error); $total += $error_info{$k}{count}; } $total = &comma_numbers($total); my $db_count = scalar keys %database_info; print $fh qq{

Overview

Global Stats

  • $fmt_unique Number of unique normalized queries
  • $fmt_queries Number of queries
  • $fmt_duration Total query duration
  • $overall_stat{'first_query_ts'} First query
  • $overall_stat{'last_query_ts'} Last query
  • $query_peak queries/s at $query_peak_date Query peak
  • $fmt_errors Number of events
  • $fmt_unique_error Number of unique normalized events
  • $main_error Max number of times the same event was reported
  • $cancelled_count Number of cancellation
  • $autovacuum_count Total number of automatic vacuums
  • $autoanalyze_count Total number of automatic analyzes
  • $tempfile_count Number temporary file
  • $fmt_temp_maxsise Max size of temporary file
  • $fmt_temp_avsize Average size of temporary file
  • $session_count Total number of sessions
  • $session_peak sessions at $session_peak_date Session peak
  • $tot_session_duration Total duration of sessions
  • $avg_session_duration Average duration of sessions
  • $avg_queries Average queries per session
  • $avg_duration Average queries duration per session
  • $connection_count Total number of connections
  • }; if ($connection_count) { print $fh qq{
  • $connection_peak connections/s at $connection_peak_date Connection peak
  • }; } print $fh qq{
  • $db_count Total number of databases
}; } sub print_general_activity { my $queries = ''; my $select_queries = ''; my $write_queries = ''; my $prepared_queries = ''; my $connections = ''; my $sessions = ''; foreach my $d (sort {$a <=> $b} keys %per_minute_info) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { my %cur_period_info = (); my $read_average_duration = 0; my $read_average_count = 0; my $write_average_duration = 0; my $write_average_count = 0; my %all_query_duration=(); foreach my $m (keys %{$per_minute_info{$d}{$h}}) { $cur_period_info{count} += ($per_minute_info{$d}{$h}{$m}{query}{count} || 0); $cur_period_info{duration} += ($per_minute_info{$d}{$h}{$m}{query}{duration} || 0); $cur_period_info{min} = $per_minute_info{$d}{$h}{$m}{query}{min} if (!exists $cur_period_info{min} || ($per_minute_info{$d}{$h}{$m}{query}{min} < $cur_period_info{min})); $cur_period_info{max} = $per_minute_info{$d}{$h}{$m}{query}{max} if (!exists $cur_period_info{max} || ($per_minute_info{$d}{$h}{$m}{query}{max} > $cur_period_info{max})); push(@{$all_query_duration{'query'}}, $per_minute_info{$d}{$h}{$m}{query}{duration}||0); foreach my $a (@SQL_ACTION) { $cur_period_info{$a}{count} += ($per_minute_info{$d}{$h}{$m}{$a}{count} || 0); $cur_period_info{$a}{duration} += ($per_minute_info{$d}{$h}{$m}{$a}{duration} || 0); push(@{$all_query_duration{$a}}, $per_minute_info{$d}{$h}{$m}{$a}{duration}||0); $cur_period_info{usual} += ($per_minute_info{$d}{$h}{$m}{$a}{count} || 0); } $cur_period_info{prepare} += ($per_minute_info{$d}{$h}{$m}{prepare} || 0); $cur_period_info{execute} += ($per_minute_info{$d}{$h}{$m}{execute} || 0); } $cur_period_info{average} = $cur_period_info{duration} / ($cur_period_info{count} || 1); $cur_period_info{'SELECT'}{average} = $cur_period_info{'SELECT'}{duration} / ($cur_period_info{'SELECT'}{count} || 1); $read_average_duration = ($cur_period_info{'SELECT'}{duration} + $cur_period_info{'COPY TO'}{duration}); $read_average_count = ($cur_period_info{'SELECT'}{count} + $cur_period_info{'COPY TO'}{count}); $write_average_duration = ($cur_period_info{'INSERT'}{duration} + $cur_period_info{'UPDATE'}{duration} + $cur_period_info{'DELETE'}{duration} + $cur_period_info{'COPY FROM'}{duration}); $write_average_count = ($cur_period_info{'INSERT'}{count} + $cur_period_info{'UPDATE'}{count} + $cur_period_info{'DELETE'}{count} + $cur_period_info{'COPY FROM'}{count}); $zday = " " if ($c > 1); $c++; my $count = &comma_numbers($cur_period_info{count}); my $min = &convert_time($cur_period_info{min}); my $max = &convert_time($cur_period_info{max}); my $average = &convert_time($cur_period_info{average}); my %percentile = (); foreach my $lp (@LATENCY_PERCENTILE) { $cur_period_info{$lp}{percentileindex} = int(@{$all_query_duration{'query'}} * $lp / 100) ; @{$all_query_duration{'query'}}= sort{ $a <=> $b } @{$all_query_duration{'query'}}; $cur_period_info{$lp}{percentile} = $all_query_duration{'query'}[$cur_period_info{$lp}{percentileindex}]; $percentile{$lp} = &convert_time($cur_period_info{$lp}{percentile}); @{$all_query_duration{'READ'}}= sort{ $a <=> $b } (@{$all_query_duration{'SELECT'}}, @{$all_query_duration{'COPY TO'}}); $cur_period_info{'READ'}{$lp}{percentileindex} = int(@{$all_query_duration{'READ'}} * $lp / 100) ; $cur_period_info{'READ'}{$lp}{percentile} = $all_query_duration{'READ'}[$cur_period_info{'READ'}{$lp}{percentileindex}]; $percentile{'READ'}{$lp} = &convert_time($cur_period_info{'READ'}{$lp}{percentile}); @{$all_query_duration{'WRITE'}}= sort{ $a <=> $b } (@{$all_query_duration{'INSERT'}},@{$all_query_duration{'UPDATE'}},@{$all_query_duration{'DELETE'}},@{$all_query_duration{'COPY FROM'}}); $cur_period_info{'WRITE'}{$lp}{percentileindex} = int(@{$all_query_duration{'WRITE'}} * $lp / 100) ; $cur_period_info{'WRITE'}{$lp}{percentile} = $all_query_duration{'WRITE'}[$cur_period_info{'WRITE'}{$lp}{percentileindex}]; $percentile{'WRITE'}{$lp} = &convert_time($cur_period_info{'WRITE'}{$lp}{percentile}); } $queries .= qq{ $zday $h $count $min $max $average }; foreach my $lp (@LATENCY_PERCENTILE) { $queries .= "$percentile{$lp}\n"; } $queries .= qq{ }; $count = &comma_numbers($cur_period_info{'SELECT'}{count}); my $copyto_count = &comma_numbers($cur_period_info{'COPY TO'}{count}); $average = &convert_time($read_average_duration / ($read_average_count || 1)); $select_queries .= qq{ $zday $h $count $copyto_count $average }; foreach my $lp (@LATENCY_PERCENTILE) { $select_queries .= "$percentile{'READ'}{$lp}\n"; } $select_queries .= qq{ }; my $insert_count = &comma_numbers($cur_period_info{'INSERT'}{count}); my $update_count = &comma_numbers($cur_period_info{'UPDATE'}{count}); my $delete_count = &comma_numbers($cur_period_info{'DELETE'}{count}); my $copyfrom_count = &comma_numbers($cur_period_info{'COPY FROM'}{count}); my $write_average = &convert_time($write_average_duration / ($write_average_count || 1)); $write_queries .= qq{ $zday $h $insert_count $update_count $delete_count $copyfrom_count $write_average} ; foreach my $lp (@LATENCY_PERCENTILE) { $write_queries .= "$percentile{'WRITE'}{$lp}\n"; } $write_queries .= qq{ }; my $prepare_count = &comma_numbers($cur_period_info{prepare}); my $execute_count = &comma_numbers($cur_period_info{execute}); my $bind_prepare = &comma_numbers(sprintf("%.2f", $cur_period_info{execute}/($cur_period_info{prepare}||1))); my $prepare_usual = &comma_numbers(sprintf("%.2f", ($cur_period_info{prepare}/($cur_period_info{usual}||1)) * 100)) . "%"; $prepared_queries .= qq{ $zday $h $prepare_count $execute_count $bind_prepare $prepare_usual }; $count = &comma_numbers($connection_info{chronos}{"$d"}{"$h"}{count}); $average = &comma_numbers(sprintf("%0.2f", $connection_info{chronos}{"$d"}{"$h"}{count} / 3600)); $connections .= qq{ $zday $h $count $average/s }; $count = &comma_numbers($session_info{chronos}{"$d"}{"$h"}{count}); $cur_period_info{'session'}{average} = $session_info{chronos}{"$d"}{"$h"}{duration} / ($session_info{chronos}{"$d"}{"$h"}{count} || 1); $average = &convert_time($cur_period_info{'session'}{average}); $sessions .= qq{ $zday $h $count $average }; } } # Set default values $queries = qq{$NODATA} if (!$queries); $select_queries = qq{$NODATA} if (!$select_queries); $write_queries = qq{$NODATA} if (!$write_queries); $prepared_queries = qq{$NODATA} if (!$prepared_queries); $connections = qq{$NODATA} if (!$connections); $sessions = qq{$NODATA} if (!$sessions); print $fh qq{

General Activity

$queries
Day Hour Count Min duration Max duration Avg duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$select_queries
Day Hour SELECT COPY TO Average Duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$write_queries
Day Hour INSERT UPDATE DELETE COPY FROM Average Duration Latency Percentile(90) Latency Percentile(95) Latency Percentile(99)
$prepared_queries
Day Hour Prepare Bind Bind/Prepare Percentage of prepare
$connections
Day Hour Count Average / Second
$sessions
Day Hour Count Average Duration
Back to the top of the General Activity table
}; } sub print_sql_traffic { my $bind_vs_prepared = sprintf("%.2f", $overall_stat{'execute'} / ($overall_stat{'prepare'} || 1)); my $total_usual_queries = 0; map { $total_usual_queries += $overall_stat{$_}; } @SQL_ACTION; my $prepared_vs_normal = sprintf("%.2f", ($overall_stat{'execute'} / ($total_usual_queries || 1))*100); my $query_peak = 0; my $query_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { $query_peak = &comma_numbers($overall_stat{'peak'}{$_}{query}); $query_peak_date = $_ if ($query_peak); last; } my $select_peak = 0; my $select_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{select} <=> $overall_stat{'peak'}{$a}{select}} keys %{$overall_stat{'peak'}}) { $select_peak = &comma_numbers($overall_stat{'peak'}{$_}{select}); $select_peak_date = $_ if ($select_peak); last; } my $write_peak = 0; my $write_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{write} <=> $overall_stat{'peak'}{$a}{write}} keys %{$overall_stat{'peak'}}) { $write_peak = &comma_numbers($overall_stat{'peak'}{$_}{write}); $write_peak_date = $_ if ($write_peak); last; } my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); print $fh qq{

SQL Traffic

Key values

  • $query_peak queries/s Query Peak
  • $query_peak_date Date
$drawn_graphs{queriespersecond_graph}
}; delete $drawn_graphs{queriespersecond_graph}; print $fh qq{

SELECT Traffic

Key values

  • $select_peak queries/s Query Peak
  • $select_peak_date Date
$drawn_graphs{selectqueries_graph}
}; delete $drawn_graphs{selectqueries_graph}; print $fh qq{

INSERT/UPDATE/DELETE Traffic

Key values

  • $write_peak queries/s Query Peak
  • $write_peak_date Date
$drawn_graphs{writequeries_graph}
}; delete $drawn_graphs{writequeries_graph}; print $fh qq{

Queries duration

Key values

  • $fmt_duration Total query duration
$drawn_graphs{durationqueries_graph}
}; delete $drawn_graphs{durationqueries_graph}; print $fh qq{

Prepared queries ratio

Key values

  • $bind_vs_prepared Ratio of bind vs prepare
  • $prepared_vs_normal % Ratio between prepared and "usual" statements
$drawn_graphs{bindpreparequeries_graph}
}; delete $drawn_graphs{bindpreparequeries_graph}; } sub print_pgbouncer_stats { my $request_peak = 0; my $request_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_req} <=> $pgb_overall_stat{'peak'}{$a}{t_req}} keys %{$pgb_overall_stat{'peak'}}) { $request_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_req}); $request_peak_date = $_; last; } my $inbytes_peak = 0; my $inbytes_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_inbytes} <=> $pgb_overall_stat{'peak'}{$a}{t_inbytes}} keys %{$pgb_overall_stat{'peak'}}) { $inbytes_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_inbytes}); $inbytes_peak_date = $_; last; } my $outbytes_peak = 0; my $outbytes_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_outbytes} <=> $pgb_overall_stat{'peak'}{$a}{t_outbytes}} keys %{$pgb_overall_stat{'peak'}}) { $outbytes_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{t_outbytes}); $outbytes_peak_date = $_; last; } my $avgduration_peak = 0; my $avgduration_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{t_avgduration} <=> $pgb_overall_stat{'peak'}{$a}{t_avgduration}} keys %{$pgb_overall_stat{'peak'}}) { $avgduration_peak = &convert_time($pgb_overall_stat{'peak'}{$_}{t_avgduration}); $avgduration_peak_date = $_; last; } print $fh qq{

Request Throughput

Key values

  • $request_peak queries/s Request Peak
  • $request_peak_date Date
$drawn_graphs{pgb_requestpersecond_graph}
}; delete $drawn_graphs{pgb_requestpersecond_graph}; print $fh qq{

Bytes I/O Throughput

Key values

  • $inbytes_peak Bytes/s In Bytes Peak
  • $inbytes_peak_date Date
  • $outbytes_peak Bytes/s Out Bytes Peak
  • $outbytes_peak_date Date
$drawn_graphs{pgb_bytepersecond_graph}
}; delete $drawn_graphs{pgb_bytepersecond_graph}; print $fh qq{

Queries Average duration

Key values

  • $avgduration_peak Average Duration Peak
  • $avgduration_peak_date Date
$drawn_graphs{pgb_avgduration_graph}
}; delete $drawn_graphs{pgb_avgduration_graph}; } sub compute_query_graphs { my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %q_dataavg = (); my %a_dataavg = (); my %c_dataavg = (); my %s_dataavg = (); my %p_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if (!exists $p_dataavg{prepare}{"$rd"}) { $p_dataavg{prepare}{"$rd"} = 0; $p_dataavg{execute}{"$rd"} = 0; $q_dataavg{count}{"$rd"} = 0; $q_dataavg{duration}{"$rd"} = 0; $q_dataavg{max}{"$rd"} = 0; $q_dataavg{min}{"$rd"} = 0; if (!$disable_query) { foreach my $action (@SQL_ACTION) { $a_dataavg{$action}{count}{"$rd"} = 0; $a_dataavg{$action}{duration}{"$rd"} = 0; $a_dataavg{$action}{max}{"$rd"} = 0; $a_dataavg{$action}{min}{"$rd"} = 0; } $a_dataavg{write}{count}{"$rd"} = 0; $a_dataavg{write}{duration}{"$rd"} = 0; } $c_dataavg{average}{"$rd"} = 0; $c_dataavg{max}{"$rd"} = 0; $c_dataavg{min}{"$rd"} = 0; $s_dataavg{average}{"$rd"} = 0; $s_dataavg{max}{"$rd"} = 0; $s_dataavg{min}{"$rd"} = 0; } if (exists $per_minute_info{$tm}{$h}{$m}{prepare}) { $p_dataavg{prepare}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{prepare}; } elsif (exists $per_minute_info{$tm}{$h}{$m}{parse}) { $p_dataavg{prepare}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{parse}; } $p_dataavg{execute}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{execute} if (exists $per_minute_info{$tm}{$h}{$m}{execute}); if (exists $per_minute_info{$tm}{$h}{$m}{query}) { # Average per minute $q_dataavg{count}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{count}; if (exists $per_minute_info{$tm}{$h}{$m}{query}{duration}) { $q_dataavg{duration}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{duration}; } # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{query}{second}}) { $q_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} > $q_dataavg{max}{"$rd"}); $q_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} < $q_dataavg{min}{"$rd"}); } if (!$disable_query) { foreach my $action (@SQL_ACTION) { $a_dataavg{$action}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); $a_dataavg{$action}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); if ( ($action ne 'SELECT') && exists $per_minute_info{$tm}{$h}{$m}{$action}{count}) { $a_dataavg{write}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); $a_dataavg{write}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); } # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{$action}{second}}) { $a_dataavg{$action}{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{$action}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{$action}{second}{$s} > $a_dataavg{$action}{max}{"$rd"}); $a_dataavg{$action}{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{$action}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{$action}{second}{$s} < $a_dataavg{$action}{min}{"$rd"}); } } } } if (exists $per_minute_info{$tm}{$h}{$m}{connection}) { # Average per minute $c_dataavg{average}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{connection}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{connection}{second}}) { $c_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); $c_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"}); } delete $per_minute_info{$tm}{$h}{$m}{connection}; } if (exists $per_minute_info{$tm}{$h}{$m}{session}) { # Average per minute $s_dataavg{average}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{session}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{session}{second}}) { $s_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{session}{second}{$s} > $s_dataavg{max}{"$rd"}); $s_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($per_minute_info{$tm}{$h}{$m}{session}{second}{$s} < $s_dataavg{min}{"$rd"}); } delete $per_minute_info{$tm}{$h}{$m}{session}; } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $q_dataavg{count}) { # Average queries per minute $graph_data{query} .= "[$t, " . int(($q_dataavg{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max queries per minute $graph_data{'query-max'} .= "[$t, " . ($q_dataavg{max}{"$rd"} || 0) . "],"; # Min queries per minute $graph_data{'query-min'} .= "[$t, " . ($q_dataavg{min}{"$rd"} || 0) . "],"; # Average duration per minute $graph_data{query4} .= "[$t, " . sprintf("%.3f", ($q_dataavg{duration}{"$rd"} || 0) / ($q_dataavg{count}{"$rd"} || 1)) . "],"; } if (scalar keys %c_dataavg) { # Average connections per minute $graph_data{conn_avg} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{conn_max} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{conn_min} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; } if (scalar keys %s_dataavg) { # Average connections per minute $graph_data{sess_avg} .= "[$t, " . int(($s_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{sess_max} .= "[$t, " . ($s_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{sess_min} .= "[$t, " . ($s_dataavg{min}{"$rd"} || 0) . "],"; } if (!$disable_query && (scalar keys %a_dataavg > 0)) { foreach my $action (@SQL_ACTION) { next if ($select_only && ($action ne 'SELECT')); # Average queries per minute $graph_data{"$action"} .= "[$t, " . int(($a_dataavg{$action}{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; if ($action eq 'SELECT') { # Max queries per minute $graph_data{"$action-max"} .= "[$t, " . ($a_dataavg{$action}{max}{"$rd"} || 0) . "],"; # Min queries per minute $graph_data{"$action-min"} .= "[$t, " . ($a_dataavg{$action}{min}{"$rd"} || 0) . "],"; # Average query duration $graph_data{"$action-2"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{$action}{duration}{"$rd"} || 0) / ($a_dataavg{$action}{count}{"$rd"} || 1)) . "],"; } else { # Average query duration $graph_data{"write"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{write}{duration}{"$rd"} || 0) / ($a_dataavg{write}{count}{"$rd"} || 1)) . "],"; } } } if (!$disable_query && (scalar keys %p_dataavg> 0)) { $graph_data{prepare} .= "[$t, " . ($p_dataavg{prepare}{"$rd"} || 0) . "],"; $graph_data{execute} .= "[$t, " . ($p_dataavg{execute}{"$rd"} || 0) . "],"; $graph_data{ratio_bind_prepare} .= "[$t, " . sprintf("%.2f", ($p_dataavg{execute}{"$rd"} || 0) / ($p_dataavg{prepare}{"$rd"} || 1)) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } $drawn_graphs{'queriespersecond_graph'} = &jqplot_linegraph( $graphid++, 'queriespersecond_graph', $graph_data{'query-max'}, $graph_data{query}, $graph_data{'query-min'}, 'Queries per second (' . $avg_minutes . ' minutes average)', 'Queries per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'connectionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'connectionspersecond_graph', $graph_data{conn_max}, $graph_data{conn_avg}, $graph_data{conn_min}, 'Connections per second (' . $avg_minutes . ' minutes average)', 'Connections per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'sessionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'sessionspersecond_graph', $graph_data{sess_max}, $graph_data{sess_avg}, $graph_data{sess_min}, 'Number of sessions (' . $avg_minutes . ' minutes average)', 'Sessions', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'selectqueries_graph'} = &jqplot_linegraph( $graphid++, 'selectqueries_graph', $graph_data{"SELECT-max"}, $graph_data{"SELECT"}, $graph_data{"SELECT-min"}, 'SELECT queries (' . $avg_minutes . ' minutes period)', 'Queries per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'writequeries_graph'} = &jqplot_linegraph( $graphid++, 'writequeries_graph', $graph_data{"DELETE"}, $graph_data{"INSERT"}, $graph_data{"UPDATE"}, 'Write queries (' . $avg_minutes . ' minutes period)', 'Queries', 'DELETE queries', 'INSERT queries', 'UPDATE queries' ); if (!$select_only) { $drawn_graphs{'durationqueries_graph'} = &jqplot_linegraph( $graphid++, 'durationqueries_graph', $graph_data{query4}, $graph_data{"SELECT-2"}, $graph_data{write}, 'Average queries duration (' . $avg_minutes . ' minutes average)', 'Duration', 'All queries', 'Select queries', 'Write queries' ); } else { $drawn_graphs{'durationqueries_graph'} = &jqplot_linegraph( $graphid++, 'durationqueries_graph', $graph_data{query4}, '', '', 'Average queries duration (' . $avg_minutes . ' minutes average)', 'Duration', 'Select queries' ); } $drawn_graphs{'bindpreparequeries_graph'} = &jqplot_linegraph( $graphid++, 'bindpreparequeries_graph', $graph_data{prepare}, $graph_data{"execute"}, $graph_data{ratio_bind_prepare}, 'Bind versus prepare statements (' . $avg_minutes . ' minutes average)', 'Number of statements', 'Prepare/Parse', 'Execute/Bind', 'Bind vs prepare' ); } sub compute_pgbouncer_graphs { my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %pgb_per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $pgb_per_minute_info{$tm}{$h}); my %c_dataavg = (); my %s_dataavg = (); foreach my $m ("00" .. "59") { my $t = timegm_nocheck(0, $m, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); # pgBouncer stats are generate each minutes, always keep this interval $graph_data{'request'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_req} || 0) . "],"; $graph_data{'inbytes'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_inbytes} || 0) . "],"; $graph_data{'outbytes'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_outbytes} || 0) . "],"; $graph_data{'avgduration'} .= "[$t, " . ($pgb_per_minute_info{$tm}{$h}{$m}{t_avgduration} || 0) . "],"; next if (!exists $pgb_per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if (exists $pgb_per_minute_info{$tm}{$h}{$m}{connection}) { # Average per minute $c_dataavg{average}{"$rd"} += $pgb_per_minute_info{$tm}{$h}{$m}{connection}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}}) { $c_dataavg{max}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); $c_dataavg{min}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"}); } delete $pgb_per_minute_info{$tm}{$h}{$m}{connection}; } if (exists $pgb_per_minute_info{$tm}{$h}{$m}{session}) { # Average per minute $s_dataavg{average}{"$rd"} += $pgb_per_minute_info{$tm}{$h}{$m}{session}{count}; # Search minimum and maximum during this minute foreach my $s (keys %{$pgb_per_minute_info{$tm}{$h}{$m}{session}{second}}) { $s_dataavg{max}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} > $s_dataavg{max}{"$rd"}); $s_dataavg{min}{"$rd"} = $pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} if ($pgb_per_minute_info{$tm}{$h}{$m}{session}{second}{$s} < $s_dataavg{min}{"$rd"}); } delete $pgb_per_minute_info{$tm}{$h}{$m}{session}; } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (scalar keys %c_dataavg) { # Average connections per minute $graph_data{conn_avg} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{conn_max} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{conn_min} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; } if (scalar keys %s_dataavg) { # Average connections per minute $graph_data{sess_avg} .= "[$t, " . int(($s_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Max connections per minute $graph_data{sess_max} .= "[$t, " . ($s_dataavg{max}{"$rd"} || 0) . "],"; # Min connections per minute $graph_data{sess_min} .= "[$t, " . ($s_dataavg{min}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } $drawn_graphs{'pgb_requestpersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_requestpersecond_graph', $graph_data{request},'',,'','Request per seconds (1 minute average)', '', 'Request per second'); $drawn_graphs{'pgb_bytepersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_bytepersecond_graph', $graph_data{inbytes},$graph_data{'outbytes'},'','Bytes I/O per seconds (1 minute average)', 'size', 'In b/s', 'Out b/s'); $drawn_graphs{'pgb_avgduration_graph'} = &jqplot_linegraph( $graphid++, 'pgb_avgduration_graph', $graph_data{avgduration},'','', 'Average query duration (1 minute average)', 'duration', 'Duration'); $drawn_graphs{'pgb_connectionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_connectionspersecond_graph', $graph_data{conn_max}, $graph_data{conn_avg}, $graph_data{conn_min}, 'Connections per second (' . $avg_minutes . ' minutes average)', 'Connections per second', 'Maximum', 'Average', 'Minimum' ); $drawn_graphs{'pgb_sessionspersecond_graph'} = &jqplot_linegraph( $graphid++, 'pgb_sessionspersecond_graph', $graph_data{sess_max}, $graph_data{sess_avg}, $graph_data{sess_min}, 'Number of sessions (' . $avg_minutes . ' minutes average)', 'Sessions', 'Maximum', 'Average', 'Minimum' ); } sub print_established_connection { my $connection_peak = 0; my $connection_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { $connection_peak = &comma_numbers($overall_stat{'peak'}{$_}{connection}); $connection_peak_date = $_ if ($connection_peak); last; } print $fh qq{

Established Connections

Key values

  • $connection_peak connections Connection Peak
  • $connection_peak_date Date
$drawn_graphs{connectionspersecond_graph}
}; delete $drawn_graphs{connectionspersecond_graph}; } sub print_established_pgb_connection { my $connection_peak = 0; my $connection_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{connection} <=> $pgb_overall_stat{'peak'}{$a}{connection}} keys %{$pgb_overall_stat{'peak'}}) { $connection_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{connection}); $connection_peak_date = $_; last; } print $fh qq{

Established Connections

Key values

  • $connection_peak connections Connection Peak
  • $connection_peak_date Date
$drawn_graphs{pgb_connectionspersecond_graph}
}; delete $drawn_graphs{pgb_connectionspersecond_graph}; } sub print_user_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$connection_info{user}}) { $conn_user_info .= "$u" . &comma_numbers($connection_info{user}{$u}) . ""; $total_count += $connection_info{user}{$u}; if ($main_user[1] < $connection_info{user}{$u}) { $main_user[0] = $u; $main_user[1] = $connection_info{user}{$u}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{user}}) { if ((($connection_info{user}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{user}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{user}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{userconnections_graph} = &jqplot_piegraph($graphid++, 'graph_userconnections', 'Connections per user', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per user

Key values

  • $main_user[0] Main User
  • $total_count connections Total
$drawn_graphs{userconnections_graph}
$conn_user_info
User Count
}; delete $drawn_graphs{userconnections_graph}; } sub print_user_pgb_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$pgb_connection_info{user}}) { $conn_user_info .= "$u" . &comma_numbers($pgb_connection_info{user}{$u}) . ""; $total_count += $pgb_connection_info{user}{$u}; if ($main_user[1] < $pgb_connection_info{user}{$u}) { $main_user[0] = $u; $main_user[1] = $pgb_connection_info{user}{$u}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{user}}) { if ((($pgb_connection_info{user}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{user}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{user}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_userconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_userconnections', 'Connections per user', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per user

Key values

  • $main_user[0] Main User
  • $total_count connections Total
$drawn_graphs{pgb_userconnections_graph}
$conn_user_info
User Count
}; delete $drawn_graphs{pgb_userconnections_graph}; } sub print_host_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$connection_info{host}}) { $conn_host_info .= "$h" . &comma_numbers($connection_info{host}{$h}) . ""; $total_count += $connection_info{host}{$h}; if ($main_host[1] < $connection_info{host}{$h}) { $main_host[0] = $h; $main_host[1] = $connection_info{host}{$h}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{host}}) { if ((($connection_info{host}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{host}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{host}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{hostconnections_graph} = &jqplot_piegraph($graphid++, 'graph_hostconnections', 'Connections per host', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per host

Key values

  • $main_host[0] Main host with $main_host[1] connections
  • $total_count Total connections
$drawn_graphs{hostconnections_graph}
$conn_host_info
Host Count
}; delete $drawn_graphs{hostconnections_graph}; } sub print_host_pgb_connection { my %infos = (); my $total_count = 0; my $c = 0; my $conn_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$pgb_connection_info{host}}) { $conn_host_info .= "$h" . &comma_numbers($pgb_connection_info{host}{$h}) . ""; $total_count += $pgb_connection_info{host}{$h}; if ($main_host[1] < $pgb_connection_info{host}{$h}) { $main_host[0] = $h; $main_host[1] = $pgb_connection_info{host}{$h}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{host}}) { if ((($pgb_connection_info{host}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{host}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{host}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_hostconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_hostconnections', 'Connections per host', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per host

Key values

  • $main_host[0] Main host with $main_host[1] connections
  • $total_count Total connections
$drawn_graphs{pgb_hostconnections_graph}
$conn_host_info
Host Count
}; delete $drawn_graphs{pgb_hostconnections_graph}; } sub print_database_connection { my %infos = (); my $total_count = 0; my $conn_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$connection_info{database}}) { $conn_database_info .= "$d " . &comma_numbers($connection_info{database}{$d}) . ""; $total_count += $connection_info{database}{$d}; if ($main_database[1] < $connection_info{database}{$d}) { $main_database[0] = $d; $main_database[1] = $connection_info{database}{$d}; } foreach my $u (sort keys %{$connection_info{user}}) { next if (!exists $connection_info{database_user}{$d}{$u}); $conn_database_info .= " $u" . &comma_numbers($connection_info{database_user}{$d}{$u}) . ""; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$connection_info{database}}) { if ((($connection_info{database}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $connection_info{database}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{database}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{databaseconnections_graph} = &jqplot_piegraph($graphid++, 'graph_databaseconnections', 'Connections per database', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per database

Key values

  • $main_database[0] Main Database
  • $total_count connections Total
$drawn_graphs{databaseconnections_graph}
$conn_database_info
Database User Count
}; delete $drawn_graphs{databaseconnections_graph}; } sub print_database_pgb_connection { my %infos = (); my $total_count = 0; my $conn_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$pgb_connection_info{database}}) { $conn_database_info .= "$d " . &comma_numbers($pgb_connection_info{database}{$d}) . ""; $total_count += $pgb_connection_info{database}{$d}; if ($main_database[1] < $pgb_connection_info{database}{$d}) { $main_database[0] = $d; $main_database[1] = $pgb_connection_info{database}{$d}; } foreach my $u (sort keys %{$pgb_connection_info{user}}) { next if (!exists $pgb_connection_info{database_user}{$d}{$u}); $conn_database_info .= " $u" . &comma_numbers($pgb_connection_info{database_user}{$d}{$u}) . ""; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_connection_info{database}}) { if ((($pgb_connection_info{database}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_connection_info{database}{$d} || 0; } else { $infos{"Sum connections < $pie_percentage_limit%"} += $pgb_connection_info{database}{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; delete $infos{"Sum connections < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_databaseconnections_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_databaseconnections', 'Connections per database', %infos); $total_count = &comma_numbers($total_count); print $fh qq{

Connections per database

Key values

  • $main_database[0] Main Database
  • $total_count connections Total
$drawn_graphs{pgb_databaseconnections_graph}
$conn_database_info
Database User Count
}; delete $drawn_graphs{pgb_databaseconnections_graph}; } sub print_simultaneous_session { my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{session} <=> $overall_stat{'peak'}{$a}{session}} keys %{$overall_stat{'peak'}}) { $session_peak = &comma_numbers($overall_stat{'peak'}{$_}{session}); $session_peak_date = $_ if ($session_peak); last; } print $fh qq{

Simultaneous sessions

Key values

  • $session_peak sessions Session Peak
  • $session_peak_date Date
$drawn_graphs{sessionspersecond_graph}
}; delete $drawn_graphs{sessionspersecond_graph}; } sub print_simultaneous_pgb_session { my $session_peak = 0; my $session_peak_date = ''; foreach (sort {$pgb_overall_stat{'peak'}{$b}{session} <=> $pgb_overall_stat{'peak'}{$a}{session}} keys %{$pgb_overall_stat{'peak'}}) { $session_peak = &comma_numbers($pgb_overall_stat{'peak'}{$_}{session}); $session_peak_date = $_; last; } print $fh qq{

Simultaneous sessions

Key values

  • $session_peak sessions Session Peak
  • $session_peak_date Date
$drawn_graphs{pgb_sessionspersecond_graph}
}; delete $drawn_graphs{pgb_sessionspersecond_graph}; } sub print_histogram_session_times { my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_session_time; $i++) { $histogram_info .= "" . &convert_time($histogram_session_time[$i-1]) . '-' . &convert_time($histogram_session_time[$i]) . "" . &comma_numbers($overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}) . "" . sprintf("%0.2f", ($overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} * 100) / ($overall_stat{histogram}{session_total}||1)) . "%"; $data{"$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"} = ($overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} || 0); if ($overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} > $most_range_value) { $most_range = "$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"; $most_range_value = $overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}; } } if ($overall_stat{histogram}{session_total} > 0) { $histogram_info .= " > " . &convert_time($histogram_session_time[-1]) . "" . &comma_numbers($overall_stat{histogram}{session_time}{'-1'}) . "" . sprintf("%0.2f", ($overall_stat{histogram}{session_time}{'-1'} * 100) / ($overall_stat{histogram}{session_total}||1)) . "%"; $data{"> $histogram_session_time[-1]ms"} = ($overall_stat{histogram}{session_time}{"-1"} || 0); if ($overall_stat{histogram}{session_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_session_time[-1]ms"; $most_range_value = $overall_stat{histogram}{session_time}{"-1"}; } $drawn_graphs{histogram_session_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_histogram_session_times', 'Sessions', \@histogram_session_time, %data); } else { $histogram_info = qq{$NODATA}; $drawn_graphs{histogram_session_times_graph} = qq{$NODATA}; } $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Histogram of session times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{histogram_session_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{histogram_session_times_graph}; } sub print_histogram_pgb_session_times { my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_session_time; $i++) { $histogram_info .= "" . &convert_time($histogram_session_time[$i-1]) . '-' . &convert_time($histogram_session_time[$i]) . "" . &comma_numbers($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}) . "" . sprintf("%0.2f", ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} * 100) / ($pgb_overall_stat{histogram}{session_total}||1)) . "%"; $data{"$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"} = ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} || 0); if ($pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]} > $most_range_value) { $most_range = "$histogram_session_time[$i-1]-$histogram_session_time[$i]ms"; $most_range_value = $pgb_overall_stat{histogram}{session_time}{$histogram_session_time[$i-1]}; } } if ($pgb_overall_stat{histogram}{session_total} > 0) { $histogram_info .= " > " . &convert_time($histogram_session_time[-1]) . "" . &comma_numbers($pgb_overall_stat{histogram}{session_time}{'-1'}) . "" . sprintf("%0.2f", ($pgb_overall_stat{histogram}{session_time}{'-1'} * 100) / ($pgb_overall_stat{histogram}{session_total}||1)) . "%"; $data{"> $histogram_session_time[-1]ms"} = ($pgb_overall_stat{histogram}{session_time}{"-1"} || 0); if ($pgb_overall_stat{histogram}{session_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_session_time[-1]ms"; $most_range_value = $pgb_overall_stat{histogram}{session_time}{"-1"}; } $drawn_graphs{pgb_histogram_session_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_pgb_histogram_session_times', 'Sessions', \@histogram_session_time, %data); } else { $histogram_info = qq{$NODATA}; $drawn_graphs{pgb_histogram_session_times_graph} = qq{$NODATA}; } $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Histogram of session times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{pgb_histogram_session_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{pgb_histogram_session_times_graph}; } sub print_user_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$session_info{user}}) { $sess_user_info .= "$u" . &comma_numbers($session_info{user}{$u}{count}) . "" . &convert_time($session_info{user}{$u}{duration}), "" . &convert_time($session_info{user}{$u}{duration} / $session_info{user}{$u}{count}) . ""; $total_count += $session_info{user}{$u}{count}; if ($main_user[1] < $session_info{user}{$u}{count}) { $main_user[0] = $u; $main_user[1] = $session_info{user}{$u}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{user}}) { if ((($session_info{user}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{user}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{user}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{usersessions_graph} = &jqplot_piegraph($graphid++, 'graph_usersessions', 'Sessions per user', %infos); $sess_user_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per user

Key values

  • $main_user[0] Main User
  • $total_count sessions Total
$drawn_graphs{usersessions_graph}
$sess_user_info
User Count Total Duration Average Duration
}; delete $drawn_graphs{usersessions_graph}; } sub print_user_pgb_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_user_info = ''; my @main_user = ('unknown',0); foreach my $u (sort keys %{$pgb_session_info{user}}) { $sess_user_info .= "$u" . &comma_numbers($pgb_session_info{user}{$u}{count}) . "" . &convert_time($pgb_session_info{user}{$u}{duration}), "" . &convert_time($pgb_session_info{user}{$u}{duration} / $pgb_session_info{user}{$u}{count}) . ""; $total_count += $pgb_session_info{user}{$u}{count}; if ($main_user[1] < $pgb_session_info{user}{$u}{count}) { $main_user[0] = $u; $main_user[1] = $pgb_session_info{user}{$u}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{user}}) { if ((($pgb_session_info{user}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{user}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{user}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_usersessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_usersessions', 'Sessions per user', %infos); $sess_user_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per user

Key values

  • $main_user[0] Main User
  • $total_count sessions Total
$drawn_graphs{pgb_usersessions_graph}
$sess_user_info
User Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_usersessions_graph}; } sub print_host_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$session_info{host}}) { $sess_host_info .= "$h" . &comma_numbers($session_info{host}{$h}{count}) . "" . &convert_time($session_info{host}{$h}{duration}) . "" . &convert_time($session_info{host}{$h}{duration} / $session_info{host}{$h}{count}) . ""; $total_count += $session_info{host}{$h}{count}; if ($main_host[1] < $session_info{host}{$h}{count}) { $main_host[0] = $h; $main_host[1] = $session_info{host}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{host}}) { if ((($session_info{host}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{host}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{host}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{hostsessions_graph} = &jqplot_piegraph($graphid++, 'graph_hostsessions', 'Connections per host', %infos); $sess_host_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per host

Key values

  • $main_host[0] Main Host
  • $total_count sessions Total
$drawn_graphs{hostsessions_graph}
$sess_host_info
Host Count Total Duration Average Duration
}; delete $drawn_graphs{hostsessions_graph}; } sub print_host_pgb_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_host_info = ''; my @main_host = ('unknown',0); foreach my $h (sort keys %{$pgb_session_info{host}}) { $sess_host_info .= "$h" . &comma_numbers($pgb_session_info{host}{$h}{count}) . "" . &convert_time($pgb_session_info{host}{$h}{duration}) . "" . &convert_time($pgb_session_info{host}{$h}{duration} / $pgb_session_info{host}{$h}{count}) . ""; $total_count += $pgb_session_info{host}{$h}{count}; if ($main_host[1] < $pgb_session_info{host}{$h}{count}) { $main_host[0] = $h; $main_host[1] = $pgb_session_info{host}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{host}}) { if ((($pgb_session_info{host}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{host}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{host}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_hostsessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_hostsessions', 'Sessions per host', %infos); $sess_host_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per host

Key values

  • $main_host[0] Main Host
  • $total_count sessions Total
$drawn_graphs{pgb_hostsessions_graph}
$sess_host_info
Host Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_hostsessions_graph}; } sub print_app_session { my %infos = (); my $total_count = 0; my $c = 0; my $sess_app_info = ''; my @main_app = ('unknown',0); foreach my $h (sort keys %{$session_info{app}}) { $sess_app_info .= "$h" . &comma_numbers($session_info{app}{$h}{count}) . "" . &convert_time($session_info{app}{$h}{duration}) . "" . &convert_time($session_info{app}{$h}{duration} / $session_info{app}{$h}{count}) . ""; $total_count += $session_info{app}{$h}{count}; if ($main_app[1] < $session_info{app}{$h}{count}) { $main_app[0] = $h; $main_app[1] = $session_info{app}{$h}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{app}}) { if ((($session_info{app}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{app}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{app}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{appsessions_graph} = &jqplot_piegraph($graphid++, 'graph_appsessions', 'Sessions per application', %infos); $sess_app_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per application

Key values

  • $main_app[0] Main Host
  • $total_count sessions Total
$drawn_graphs{appsessions_graph}
$sess_app_info
Application Count Total Duration Average Duration
}; delete $drawn_graphs{appsessions_graph}; } sub print_database_session { my %infos = (); my $total_count = 0; my $sess_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$session_info{database}}) { $sess_database_info .= "$d" . &comma_numbers($session_info{database}{$d}{count}) . "" . &convert_time($session_info{database}{$d}{duration}) . "" . &convert_time($session_info{database}{$d}{duration} / $session_info{database}{$d}{count}) . ""; $total_count += $session_info{database}{$d}{count}; if ($main_database[1] < $session_info{database}{$d}{count}) { $main_database[0] = $d; $main_database[1] = $session_info{database}{$d}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$session_info{database}}) { if ((($session_info{database}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $session_info{database}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{database}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{databasesessions_graph} = &jqplot_piegraph($graphid++, 'graph_databasesessions', 'Sessions per database', %infos); $sess_database_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per database

Key values

  • $main_database[0] Main Database
  • $total_count sessions Total
$drawn_graphs{databasesessions_graph}
$sess_database_info
Database User Count Total Duration Average Duration
}; delete $drawn_graphs{databasesessions_graph}; } sub print_database_pgb_session { my %infos = (); my $total_count = 0; my $sess_database_info = ''; my @main_database = ('unknown',0); foreach my $d (sort keys %{$pgb_session_info{database}}) { $sess_database_info .= "$d" . &comma_numbers($pgb_session_info{database}{$d}{count}) . "" . &convert_time($pgb_session_info{database}{$d}{duration}) . "" . &convert_time($pgb_session_info{database}{$d}{duration} / $pgb_session_info{database}{$d}{count}) . ""; $total_count += $pgb_session_info{database}{$d}{count}; if ($main_database[1] < $pgb_session_info{database}{$d}{count}) { $main_database[0] = $d; $main_database[1] = $pgb_session_info{database}{$d}{count}; } } if ($graph) { my @small = (); foreach my $d (sort keys %{$pgb_session_info{database}}) { if ((($pgb_session_info{database}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $pgb_session_info{database}{$d}{count} || 0; } else { $infos{"Sum sessions < $pie_percentage_limit%"} += $pgb_session_info{database}{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } $drawn_graphs{pgb_databasesessions_graph} = &jqplot_piegraph($graphid++, 'graph_pgb_databasesessions', 'Sessions per database', %infos); $sess_database_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); print $fh qq{

Sessions per database

Key values

  • $main_database[0] Main Database
  • $total_count sessions Total
$drawn_graphs{pgb_databasesessions_graph}
$sess_database_info
Database User Count Total Duration Average Duration
}; delete $drawn_graphs{pgb_databasesessions_graph}; } sub print_checkpoint { # checkpoint my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($checkpoint_info{wbuffer}) { $chk_dataavg{wbuffer}{"$rd"} = 0 if (!exists $chk_dataavg{wbuffer}{"$rd"}); $chk_dataavg{file_added}{"$rd"} = 0 if (!exists $chk_dataavg{file_added}{"$rd"}); $chk_dataavg{file_removed}{"$rd"} = 0 if (!exists $chk_dataavg{file_removed}{"$rd"}); $chk_dataavg{file_recycled}{"$rd"} = 0 if (!exists $chk_dataavg{file_recycled}{"$rd"}); if (exists $per_minute_info{$tm}{$h}{$m}{checkpoint}) { $chk_dataavg{wbuffer}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{wbuffer} || 0); $chk_dataavg{file_added}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{file_added} || 0); $chk_dataavg{file_removed}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{file_removed} || 0); $chk_dataavg{file_recycled}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{file_recycled} || 0); } } if (exists $checkpoint_info{distance} || exists $checkpoint_info{estimate}) { $chk_dataavg{distance}{"$rd"} = 0 if (!exists $chk_dataavg{distance}{"$rd"}); $chk_dataavg{estimate}{"$rd"} = 0 if (!exists $chk_dataavg{estimate}{"$rd"}); if (exists $per_minute_info{$tm}{$h}{$m}{checkpoint}) { $chk_dataavg{distance}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{distance} || 0) * 1000; $chk_dataavg{distance_count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{distance_count} || 1); $chk_dataavg{estimate}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{estimate} || 0) * 1000; } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); # Average of written checkpoint buffers and wal files if (exists $chk_dataavg{wbuffer}) { $graph_data{wbuffer} .= "[$t, " . ($chk_dataavg{wbuffer}{"$rd"} || 0) . "],"; $graph_data{file_added} .= "[$t, " . ($chk_dataavg{file_added}{"$rd"} || 0) . "],"; $graph_data{file_removed} .= "[$t, " . ($chk_dataavg{file_removed}{"$rd"} || 0) . "],"; $graph_data{file_recycled} .= "[$t, " . ($chk_dataavg{file_recycled}{"$rd"} || 0) . "],"; } if (exists $chk_dataavg{distance} || $chk_dataavg{estimate}) { $graph_data{distance} .= "[$t, " . int(($chk_dataavg{distance}{"$rd"}/($chk_dataavg{distance_count}{"$rd"} || 1)) || 0) . "],"; $graph_data{estimate} .= "[$t, " . int(($chk_dataavg{estimate}{"$rd"}/($chk_dataavg{distance_count}{"$rd"} || 1)) || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Checkpoint buffers and files $drawn_graphs{checkpointwritebuffers_graph} = &jqplot_linegraph($graphid++, 'checkpointwritebuffers_graph', $graph_data{wbuffer}, '', '', 'Checkpoint write buffers (' . $avg_minutes . ' minutes period)', 'Buffers', 'Write buffers', '', '' ); $drawn_graphs{checkpointfiles_graph} = &jqplot_linegraph($graphid++, 'checkpointfiles_graph', $graph_data{file_added}, $graph_data{file_removed}, $graph_data{file_recycled}, 'Checkpoint Wal files usage (' . $avg_minutes . ' minutes period)', 'Number of files', 'Added', 'Removed', 'Recycled' ); $drawn_graphs{checkpointdistance_graph} = &jqplot_linegraph($graphid++, 'checkpointdistance_graph', $graph_data{distance}, $graph_data{estimate}, '', 'Checkpoint mean distance and estimate (' . $avg_minutes . ' minutes period)', 'Number of bytes', 'distance', 'estimate' ); my $checkpoint_wbuffer_peak = 0; my $checkpoint_wbuffer_peak_date = ''; foreach (sort {$overall_checkpoint{'peak'}{$b}{checkpoint_wbuffer} <=> $overall_checkpoint{'peak'}{$a}{checkpoint_wbuffer}} keys %{$overall_checkpoint{'peak'}}) { $checkpoint_wbuffer_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{checkpoint_wbuffer}); $checkpoint_wbuffer_peak_date = $_; last; } my $walfile_usage_peak = 0; my $walfile_usage_peak_date = ''; foreach (sort {$overall_checkpoint{'peak'}{$b}{walfile_usage} <=> $overall_checkpoint{'peak'}{$a}{walfile_usage}} keys %{$overall_checkpoint{'peak'}}) { $walfile_usage_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{walfile_usage}); $walfile_usage_peak_date = $_; last; } print $fh qq{

Checkpoints / Restartpoints

Checkpoints Buffers

Key values

  • $checkpoint_wbuffer_peak buffers Checkpoint Peak
  • $checkpoint_wbuffer_peak_date Date
  • $overall_checkpoint{checkpoint_write} seconds Highest write time
  • $overall_checkpoint{checkpoint_sync} seconds Sync time
$drawn_graphs{checkpointwritebuffers_graph}
}; delete $drawn_graphs{checkpointwritebuffers_graph}; print $fh qq{

Checkpoints Wal files

Key values

  • $walfile_usage_peak files Wal files usage Peak
  • $walfile_usage_peak_date Date
$drawn_graphs{checkpointfiles_graph}
}; delete $drawn_graphs{checkpointfiles_graph}; print $fh qq{

Checkpoints distance

Key values

$drawn_graphs{checkpointdistance_graph}
}; delete $drawn_graphs{checkpointdistance_graph}; my $buffers = ''; my $files = ''; my $warnings = ''; my $distance = ''; foreach my $d (sort {$a <=> $b} keys %per_minute_info) { $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { $buffers .= "$zday$h"; $files .= "$zday$h"; $warnings .= "$zday$h"; $distance .= "$zday$h"; $zday = ''; my %cinf = (); my %rinf = (); my %cainf = (); my %rainf = (); my %dinf = (); foreach my $m (keys %{$per_minute_info{$d}{$h}}) { if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}) { $cinf{wbuffer} += $per_minute_info{$d}{$h}{$m}{checkpoint}{wbuffer}; $cinf{file_added} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_added}; $cinf{file_removed} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_removed}; $cinf{file_recycled} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_recycled}; $cinf{write} += $per_minute_info{$d}{$h}{$m}{checkpoint}{write}; $cinf{sync} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync}; $cinf{total} += $per_minute_info{$d}{$h}{$m}{checkpoint}{total}; $cainf{sync_files} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_files}; $cainf{sync_avg} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_avg}; $cainf{sync_longest} = $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_longest} if ($per_minute_info{$d}{$h}{$m}{checkpoint}{sync_longest} > $cainf{sync_longest}); } if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}) { $cinf{warning} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}; $cinf{warning_seconds} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning_seconds}; } if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}{distance} || $per_minute_info{$d}{$h}{$m}{checkpoint}{estimate}) { $dinf{distance}{sum} += $per_minute_info{$d}{$h}{$m}{checkpoint}{distance}; $dinf{estimate}{sum} += $per_minute_info{$d}{$h}{$m}{checkpoint}{estimate}; $dinf{distance}{count} += $per_minute_info{$d}{$h}{$m}{checkpoint}{distance_count}; } } if (scalar keys %cinf) { $buffers .= "" . &comma_numbers($cinf{wbuffer}) . "" . &comma_numbers($cinf{write}) . 's' . "" . &comma_numbers($cinf{sync}) . 's' . "" . &comma_numbers($cinf{total}) . 's' . ""; $files .= "" . &comma_numbers($cinf{file_added}) . "" . &comma_numbers($cinf{file_removed}) . "" . &comma_numbers($cinf{file_recycled}) . "" . &comma_numbers($cainf{sync_files}) . "" . &comma_numbers($cainf{sync_longest}) . 's' . "" . &comma_numbers($cainf{sync_avg}) . 's' . ""; } else { $buffers .= "00s0s0s"; $files .= "00000s0s"; } if (exists $cinf{warning}) { $warnings .= "" . &comma_numbers($cinf{warning}) . "" . &comma_numbers(sprintf( "%.2f", ($cinf{warning_seconds} || 0) / ($cinf{warning} || 1))) . "s"; } else { $warnings .= "00s"; } if (exists $dinf{distance} || $dinf{estimate}) { $distance .= "" . &comma_numbers(sprintf( "%.2f", $dinf{distance}{sum}/$dinf{distance}{count})) . " kB" . &comma_numbers(sprintf( "%.2f", $dinf{estimate}{sum}/$dinf{distance}{count})) . " kB"; } else { $distance .= "00"; } } } $buffers = qq{$NODATA} if (!$buffers); $files = qq{$NODATA} if (!$files); $warnings = qq{$NODATA} if (!$warnings); $distance = qq{$NODATA} if (!$distance); print $fh qq{

Checkpoints Activity

$buffers
Day Hour Written buffers Write time Sync time Total time
$files
Day Hour Added Removed Recycled Synced files Longest sync Average sync
$warnings
Day Hour Count Avg time (sec)
$distance
Day Hour Mean distance Mean estimate
Back to the top of the Checkpoint Activity table
}; } sub print_temporary_file { # checkpoint my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($tempfile_info{count}) { $t_dataavg{size}{"$rd"} = 0 if (!exists $t_dataavg{size}{"$rd"}); $t_dataavg{count}{"$rd"} = 0 if (!exists $t_dataavg{count}{"$rd"}); if (exists $per_minute_info{$tm}{$h}{$m}{tempfile}) { $t_dataavg{size}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{size} || 0); $t_dataavg{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{count} || 0); } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $t_dataavg{size}) { $graph_data{size} .= "[$t, " . ($t_dataavg{size}{"$rd"} || 0) . "],"; $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Temporary file size $drawn_graphs{temporarydata_graph} = &jqplot_linegraph($graphid++, 'temporarydata_graph', $graph_data{size}, '', '', 'Size of temporary files (' . $avg_minutes . ' minutes period)', 'Size of files', 'Size of files' ); # Temporary file number $drawn_graphs{temporaryfile_graph} = &jqplot_linegraph($graphid++, 'temporaryfile_graph', $graph_data{count}, '', '', 'Number of temporary files (' . $avg_minutes . ' minutes period)', 'Number of files', 'Number of files' ); my $tempfile_size_peak = 0; my $tempfile_size_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{tempfile_size} <=> $overall_stat{'peak'}{$a}{tempfile_size}} keys %{$overall_stat{'peak'}}) { $tempfile_size_peak = &pretty_print_size($overall_stat{'peak'}{$_}{tempfile_size}); $tempfile_size_peak_date = $_ if ($tempfile_size_peak); last; } print $fh qq{

Temporary Files

Size of temporary files

Key values

  • $tempfile_size_peak Temp Files size Peak
  • $tempfile_size_peak_date Date
$drawn_graphs{temporarydata_graph}
}; delete $drawn_graphs{temporarydata_graph}; my $tempfile_count_peak = 0; my $tempfile_count_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{tempfile_count} <=> $overall_stat{'peak'}{$a}{tempfile_count}} keys %{$overall_stat{'peak'}}) { $tempfile_count_peak = &comma_numbers($overall_stat{'peak'}{$_}{tempfile_count}); $tempfile_count_peak_date = $_ if ($tempfile_count_peak); last; } print $fh qq{

Number of temporary files

Key values

  • $tempfile_count_peak per second Temp Files Peak
  • $tempfile_count_peak_date Date
$drawn_graphs{temporaryfile_graph}
}; delete $drawn_graphs{temporaryfile_graph}; my $tempfiles_activity = ''; foreach my $d (sort {$a <=> $b} keys %per_minute_info) { $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { $tempfiles_activity .= "$zday$h"; $zday = ""; my %tinf = (); foreach my $m (keys %{$per_minute_info{$d}{$h}}) { if (exists $per_minute_info{$d}{$h}{$m}{tempfile}) { $tinf{size} += $per_minute_info{$d}{$h}{$m}{tempfile}{size}; $tinf{count} += $per_minute_info{$d}{$h}{$m}{tempfile}{count}; } } if (scalar keys %tinf) { my $temp_average = &pretty_print_size(sprintf("%.2f", $tinf{size} / $tinf{count})); $tempfiles_activity .= "" . &comma_numbers($tinf{count}) . "" . &pretty_print_size($tinf{size}) . "" . "$temp_average"; } else { $tempfiles_activity .= "00"; } } } $tempfiles_activity = qq{$NODATA} if (!$tempfiles_activity); print $fh qq{

Temporary Files Activity

$tempfiles_activity
Day Hour Count Total size Average size
Back to the top of the Temporary Files Activity table
}; } sub print_cancelled_queries { # checkpoint my %graph_data = (); if ($graph) { foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if ($cancelled_info{count}) { $t_dataavg{count}{"$rd"} = 0 if (!exists $t_dataavg{count}{"$rd"}); if (exists $per_minute_info{$tm}{$h}{$m}{cancelled}) { $t_dataavg{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{cancelled}{count} || 0); } } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $t_dataavg{count}) { $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } # Number of cancelled queries graph $drawn_graphs{cancelledqueries_graph} = &jqplot_linegraph($graphid++, 'cancelledqueries_graph', $graph_data{count}, '', '', 'Number of cancelled queries (' . $avg_minutes . ' minutes period)', 'Number of cancellation', 'Number of cancellation' ); my $cancelled_count_peak = 0; my $cancelled_count_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{cancelled_count} <=> $overall_stat{'peak'}{$a}{cancelled_count}} keys %{$overall_stat{'peak'}}) { $cancelled_count_peak = &comma_numbers($overall_stat{'peak'}{$_}{cancelled_count}); $cancelled_count_peak_date = $_; last; } print $fh qq{

Number of cancelled queries

Key values

  • $cancelled_count_peak per second Cancelled query Peak
  • $cancelled_count_peak_date Date
$drawn_graphs{cancelledqueries_graph}
}; delete $drawn_graphs{cancelledqueries_graph}; } sub print_analyze_per_table { # ANALYZE stats per table my %infos = (); my $total_count = 0; my $analyze_info = ''; my @main_analyze = ('unknown',0); foreach my $t (sort {$autoanalyze_info{tables}{$b}{analyzes} <=> $autoanalyze_info{tables}{$a}{analyzes}} keys %{$autoanalyze_info{tables}}) { $analyze_info .= "$t" . $autoanalyze_info{tables}{$t}{analyzes} . ""; $total_count += $autoanalyze_info{tables}{$t}{analyzes}; if ($main_analyze[1] < $autoanalyze_info{tables}{$t}{analyzes}) { $main_analyze[0] = $t; $main_analyze[1] = $autoanalyze_info{tables}{$t}{analyzes}; } } $analyze_info .= "Total" . &comma_numbers($total_count) . ""; if ($graph) { my @small = (); foreach my $d (sort keys %{$autoanalyze_info{tables}}) { if ((($autoanalyze_info{tables}{$d}{analyzes} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $autoanalyze_info{tables}{$d}{analyzes} || 0; } else { $infos{"Sum analyzes < $pie_percentage_limit%"} += $autoanalyze_info{tables}{$d}{analyzes} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum analyzes < $pie_percentage_limit%"}; delete $infos{"Sum analyzes < $pie_percentage_limit%"}; } } $drawn_graphs{tableanalyzes_graph} = &jqplot_piegraph($graphid++, 'graph_tableanalyzes', 'Analyzes per tables', %infos); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_analyze[0] =~ s/^([^\.]+)\.//) { $database = $1; } $analyze_info = qq{$NODATA} if (!$total_count); print $fh qq{

Analyzes per table

Key values

  • $main_analyze[0] ($main_analyze[1]) Main table analyzed (database $database)
  • $total_count analyzes Total
$drawn_graphs{tableanalyzes_graph}
$analyze_info
Table Number of analyzes
}; delete $drawn_graphs{tableanalyzes_graph}; } sub print_vacuum { # checkpoint my %graph_data = (); foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %chk_dataavg = (); my %t_dataavg = (); my %v_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); $v_dataavg{acount}{"$rd"} = 0 if (!exists $v_dataavg{acount}{"$rd"}); $v_dataavg{vcount}{"$rd"} = 0 if (!exists $v_dataavg{vcount}{"$rd"}); if (exists $per_minute_info{$tm}{$h}{$m}{autovacuum}) { $v_dataavg{vcount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autovacuum}{count} || 0); } if (exists $per_minute_info{$tm}{$h}{$m}{autoanalyze}) { $v_dataavg{acount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autoanalyze}{count} || 0); } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (exists $v_dataavg{vcount}) { $graph_data{vcount} .= "[$t, " . ($v_dataavg{vcount}{"$rd"} || 0) . "],"; } if (exists $v_dataavg{acount}) { $graph_data{acount} .= "[$t, " . ($v_dataavg{acount}{"$rd"} || 0) . "],"; } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } # VACUUMs vs ANALYZEs chart $drawn_graphs{autovacuum_graph} = &jqplot_linegraph($graphid++, 'autovacuum_graph', $graph_data{vcount}, $graph_data{acount}, '', 'Autovacuum actions (' . $avg_minutes . ' minutes period)', '', 'VACUUMs', 'ANALYZEs' ); my $vacuum_size_peak = 0; my $vacuum_size_peak_date = ''; foreach (sort {$overall_stat{'peak'}{$b}{vacuum_size} <=> $overall_stat{'peak'}{$a}{vacuum_size}} keys %{$overall_stat{'peak'}}) { $vacuum_size_peak = &comma_numbers($overall_stat{'peak'}{$_}{vacuum_size}); $vacuum_size_peak_date = $_; last; } my $autovacuum_peak_system_usage_db = ''; if ($autovacuum_info{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { $autovacuum_peak_system_usage_db = $1; } my $autoanalyze_peak_system_usage_db = ''; if ($autoanalyze_info{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { $autoanalyze_peak_system_usage_db = $1; } $autovacuum_info{peak}{system_usage}{elapsed} ||= 0; $autoanalyze_info{peak}{system_usage}{elapsed} ||= 0; print $fh qq{

Vacuums

Vacuums / Analyzes Distribution

Key values

  • $autovacuum_info{peak}{system_usage}{elapsed} sec Highest CPU-cost vacuum
    Table $autovacuum_info{peak}{system_usage}{table}
    Database $autovacuum_peak_system_usage_db
  • $autovacuum_info{peak}{system_usage}{date} Date
  • $autoanalyze_info{peak}{system_usage}{elapsed} sec Highest CPU-cost analyze
    Table $autoanalyze_info{peak}{system_usage}{table}
    Database $autovacuum_peak_system_usage_db
  • $autoanalyze_info{peak}{system_usage}{date} Date
$drawn_graphs{autovacuum_graph}
}; delete $drawn_graphs{autovacuum_graph}; # ANALYZE stats per table &print_analyze_per_table(); # VACUUM stats per table &print_vacuum_per_table(); # Show tuples and pages removed per table &print_vacuum_tuple_removed; &print_vacuum_page_removed; my $vacuum_activity = ''; foreach my $d (sort {$a <=> $b} keys %per_minute_info) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { $vacuum_activity .= "$zday$h"; $zday = ""; my %ainf = (); foreach my $m (keys %{$per_minute_info{$d}{$h}}) { if (exists $per_minute_info{$d}{$h}{$m}{autovacuum}{count}) { $ainf{vcount} += $per_minute_info{$d}{$h}{$m}{autovacuum}{count}; } if (exists $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}) { $ainf{acount} += $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}; } } if (scalar keys %ainf) { $vacuum_activity .= "" . &comma_numbers($ainf{vcount}) . ""; } else { $vacuum_activity .= "0"; } if (scalar keys %ainf) { $vacuum_activity .= "" . &comma_numbers($ainf{acount}) . ""; } else { $vacuum_activity .= "0"; } } } $vacuum_activity = qq{$NODATA} if (!$vacuum_activity); print $fh qq{

Autovacuum Activity

$vacuum_activity
Day Hour VACUUMs ANALYZEs
Back to the top of the Autovacuum Activity table
}; } sub print_vacuum_per_table { # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $vacuum_info = ''; my @main_vacuum = ('unknown',0); foreach my $t (sort {$autovacuum_info{tables}{$b}{vacuums} <=> $autovacuum_info{tables}{$a}{vacuums}} keys %{$autovacuum_info{tables}}) { $vacuum_info .= "$t" . $autovacuum_info{tables}{$t}{vacuums} . "" . $autovacuum_info{tables}{$t}{idxscans} . ""; $total_count += $autovacuum_info{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; if ($main_vacuum[1] < $autovacuum_info{tables}{$t}{vacuums}) { $main_vacuum[0] = $t; $main_vacuum[1] = $autovacuum_info{tables}{$t}{vacuums}; } } $vacuum_info .= "Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . ""; my %infos = (); my @small = (); foreach my $d (sort keys %{$autovacuum_info{tables}}) { if ((($autovacuum_info{tables}{$d}{vacuums} * 100) / ($total_count||1)) > $pie_percentage_limit) { $infos{$d} = $autovacuum_info{tables}{$d}{vacuums} || 0; } else { $infos{"Sum vacuums < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{vacuums} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum vacuums < $pie_percentage_limit%"}; delete $infos{"Sum vacuums < $pie_percentage_limit%"}; } $drawn_graphs{tablevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_tablevacuums', 'Vacuums per tables', %infos); $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_vacuum[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Vacuums per table

Key values

  • $main_vacuum[0] ($main_vacuum[1]) Main table vacuumed on database $database
  • $total_count vacuums Total
$drawn_graphs{tablevacuums_graph}
$vacuum_info
Table Number of vacuums Index scans
}; delete $drawn_graphs{tablevacuums_graph}; } sub print_vacuum_tuple_removed { # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $total_tuple = 0; my $total_page = 0; my $vacuum_info = ''; my @main_tuple = ('unknown',0); foreach my $t (sort {$autovacuum_info{tables}{$b}{tuples}{removed} <=> $autovacuum_info{tables}{$a}{tuples}{removed}} keys %{$autovacuum_info{tables}}) { $vacuum_info .= "$t" . $autovacuum_info{tables}{$t}{vacuums} . "" . $autovacuum_info{tables}{$t}{idxscans} . "" . $autovacuum_info{tables}{$t}{tuples}{removed} . "" . $autovacuum_info{tables}{$t}{pages}{removed} . ""; $total_count += $autovacuum_info{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; $total_tuple += $autovacuum_info{tables}{$t}{tuples}{removed}; $total_page += $autovacuum_info{tables}{$t}{pages}{removed}; if ($main_tuple[1] < $autovacuum_info{tables}{$t}{tuples}{removed}) { $main_tuple[0] = $t; $main_tuple[1] = $autovacuum_info{tables}{$t}{tuples}{removed}; } } $vacuum_info .= "Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . "" . &comma_numbers($total_tuple) . "" . &comma_numbers($total_page) . ""; my %infos_tuple = (); my @small = (); foreach my $d (sort keys %{$autovacuum_info{tables}}) { if ((($autovacuum_info{tables}{$d}{tuples}{removed} * 100) / ($total_tuple||1)) > $pie_percentage_limit) { $infos_tuple{$d} = $autovacuum_info{tables}{$d}{tuples}{removed} || 0; } else { $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{tuples}{removed} || 0; push(@small, $d); } } if ($#small == 0) { $infos_tuple{$small[0]} = $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; delete $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; } $drawn_graphs{tuplevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_tuplevacuums', 'Tuples removed per tables', %infos_tuple); $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_tuple[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Tuples removed per table

Key values

  • $main_tuple[0] ($main_tuple[1]) Main table with removed tuples on database $database
  • $total_tuple tuples Total removed
$drawn_graphs{tuplevacuums_graph}
$vacuum_info
Table Number of vacuums Index scans Tuples removed Pages removed
}; delete $drawn_graphs{tuplevacuums_graph}; } sub print_vacuum_page_removed { # VACUUM stats per table my $total_count = 0; my $total_idxscan = 0; my $total_tuple = 0; my $total_page = 0; my $vacuum_info = ''; my @main_tuple = ('unknown',0); my @main_page = ('unknown',0); foreach my $t (sort {$autovacuum_info{tables}{$b}{pages}{removed} <=> $autovacuum_info{tables}{$a}{pages}{removed}} keys %{$autovacuum_info{tables}}) { $vacuum_info .= "$t" . $autovacuum_info{tables}{$t}{vacuums} . "" . $autovacuum_info{tables}{$t}{idxscans} . "" . $autovacuum_info{tables}{$t}{tuples}{removed} . "" . $autovacuum_info{tables}{$t}{pages}{removed} . ""; $total_count += $autovacuum_info{tables}{$t}{vacuums}; $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; $total_tuple += $autovacuum_info{tables}{$t}{tuples}{removed}; $total_page += $autovacuum_info{tables}{$t}{pages}{removed}; if ($main_page[1] < $autovacuum_info{tables}{$t}{pages}{removed}) { $main_page[0] = $t; $main_page[1] = $autovacuum_info{tables}{$t}{pages}{removed}; } } $vacuum_info .= "Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . "" . &comma_numbers($total_tuple) . "" . &comma_numbers($total_page) . ""; my %infos_page = (); my @small = (); foreach my $d (sort keys %{$autovacuum_info{tables}}) { if ((($autovacuum_info{tables}{$d}{pages}{removed} * 100) / ($total_page || 1)) > $pie_percentage_limit) { $infos_page{$d} = $autovacuum_info{tables}{$d}{pages}{removed} || 0; } else { $infos_page{"Sum pages removed < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{pages}{removed} || 0; push(@small, $d); } } if ($#small == 0) { $infos_page{$small[0]} = $infos_page{"Sum pages removed < $pie_percentage_limit%"}; delete $infos_page{"Sum pages removed < $pie_percentage_limit%"}; } $drawn_graphs{pagevacuums_graph} = &jqplot_piegraph($graphid++, 'graph_pagevacuums', 'Pages removed per tables', %infos_page); $vacuum_info = qq{$NODATA} if (!$total_count); $total_count = &comma_numbers($total_count); my $database = ''; if ($main_page[0] =~ s/^([^\.]+)\.//) { $database = $1; } print $fh qq{

Pages removed per table

Key values

  • $main_page[0] ($main_page[1]) Main table with removed pages on database $database
  • $total_page pages Total removed
$drawn_graphs{pagevacuums_graph}
$vacuum_info
Table Number of vacuums Index scans Tuples removed Pages removed
}; delete $drawn_graphs{pagevacuums_graph}; } sub print_lock_type { my %locktype = (); my $total_count = 0; my $total_duration = 0; my $locktype_info = ''; my @main_locktype = ('unknown',0); foreach my $t (sort keys %lock_info) { $locktype_info .= "$t" . &comma_numbers($lock_info{$t}{count}) . "" . &convert_time($lock_info{$t}{duration}) . "" . &convert_time($lock_info{$t}{duration} / ($lock_info{$t}{count} || 1)) . ""; $total_count += $lock_info{$t}{count}; $total_duration += $lock_info{$t}{duration}; if ($main_locktype[1] < $lock_info{$t}{count}) { $main_locktype[0] = $t; $main_locktype[1] = $lock_info{$t}{count}; } foreach my $o (sort keys %{$lock_info{$t}}) { next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); $locktype_info .= "$o" . &comma_numbers($lock_info{$t}{$o}{count}) . "" . &convert_time($lock_info{$t}{$o}{duration}) . "" . &convert_time($lock_info{$t}{$o}{duration} / $lock_info{$t}{$o}{count}) . "\n"; } } if ($total_count > 0) { $locktype_info .= "Total" . &comma_numbers($total_count) . "" . &convert_time($total_duration) . "" . &convert_time($total_duration / ($total_count || 1)) . ""; } else { $locktype_info = qq{$NODATA}; } if ($graph) { my @small = (); foreach my $d (sort keys %lock_info) { if ((($lock_info{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { $locktype{$d} = $lock_info{$d}{count} || 0; } else { $locktype{"Sum lock types < $pie_percentage_limit%"} += $lock_info{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $locktype{$small[0]} = $locktype{"Sum types < $pie_percentage_limit%"}; delete $locktype{"Sum lock types < $pie_percentage_limit%"}; } } $drawn_graphs{lockbytype_graph} = &jqplot_piegraph($graphid++, 'graph_lockbytype', 'Type of locks', %locktype); $total_count = &comma_numbers($total_count); print $fh qq{

Locks

Locks by types

Key values

  • $main_locktype[0] Main Lock Type
  • $total_count locks Total
$drawn_graphs{lockbytype_graph}
$locktype_info
Type Object Count Total Duration Average Duration (s)
}; delete $drawn_graphs{lockbytype_graph}; } sub print_query_type { my %data = (); my $total_queries = 0; my $total_select = 0; my $total_write = 0; foreach my $a (@SQL_ACTION) { $total_queries += $overall_stat{$a}; if ($a eq 'SELECT') { $total_select += $overall_stat{$a}; } elsif ($a ne 'OTHERS') { $total_write += $overall_stat{$a}; } } my $total = $overall_stat{'queries_number'}; my $querytype_info = ''; foreach my $a (@SQL_ACTION) { $querytype_info .= "$a" . &comma_numbers($overall_stat{$a}) . "" . sprintf("%0.2f", ($overall_stat{$a} * 100) / ($total||1)) . "%"; } if (($total - $total_queries) > 0) { $querytype_info .= "OTHERS" . &comma_numbers($total - $total_queries) . "" . sprintf("%0.2f", (($total - $total_queries) * 100) / ($total||1)) . "%"; } $querytype_info = qq{$NODATA} if (!$total); if ($graph && $total) { foreach my $t (@SQL_ACTION) { if ((($overall_stat{$t} * 100) / ($total||1)) > $pie_percentage_limit) { $data{$t} = $overall_stat{$t} || 0; } else { $data{"Sum query types < $pie_percentage_limit%"} += $overall_stat{$t} || 0; } } if (((($total - $total_queries) * 100) / ($total||1)) > $pie_percentage_limit) { $data{'Others'} = $total - $total_queries; } else { $data{"Sum query types < $pie_percentage_limit%"} += $total - $total_queries; } } $drawn_graphs{queriesbytype_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbytype', 'Type of queries', %data); $total_select = &comma_numbers($total_select); $total_write = &comma_numbers($total_write); print $fh qq{

Queries

Queries by type

Key values

  • $total_select Total read queries
  • $total_write Total write queries
$drawn_graphs{queriesbytype_graph}
$querytype_info
Type Count Percentage
}; delete $drawn_graphs{queriesbytype_graph}; } sub print_query_per_database { my %infos = (); my $total_count = 0; my $query_database_info = ''; my @main_database = ('unknown', 0); my @main_database_duration = ('unknown', 0); foreach my $d (sort keys %database_info) { $query_database_info .= "$dTotal" . &comma_numbers($database_info{$d}{count}) . "" . &convert_time($database_info{$d}{duration}) . ""; $total_count += $database_info{$d}{count}; if ($main_database[1] < $database_info{$d}{count}) { $main_database[0] = $d; $main_database[1] = $database_info{$d}{count}; } if ($main_database_duration[1] < $database_info{$d}{duration}) { $main_database_duration[0] = $d; $main_database_duration[1] = $database_info{$d}{duration}; } foreach my $r (sort keys %{$database_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_database_info .= "$r" . &comma_numbers($database_info{$d}{$r}) . "" . &convert_time($database_info{$d}{"$r|duration"}) . ""; } } $query_database_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %database_info) { if ((($database_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $database_info{$d}{count} || 0; } else { $infos{"Sum queries per databases < $pie_percentage_limit%"} += $database_info{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per databases < $pie_percentage_limit%"}; delete $infos{"Sum queries per databases < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbydatabase_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbydatabase', 'Queries per database', %infos); $main_database[1] = &comma_numbers($main_database[1]); $main_database_duration[1] = &convert_time($main_database_duration[1]); print $fh qq{

Queries by database

Key values

  • $main_database[0] Main database
  • $main_database[1] Requests
  • $main_database_duration[1] ($main_database_duration[0])
  • Main time consuming database
$drawn_graphs{queriesbydatabase_graph}
$query_database_info
Database Request type Count Duration
}; delete $drawn_graphs{queriesbydatabase_graph}; } sub print_query_per_application { my %infos = (); my $total_count = 0; my $query_application_info = ''; my @main_application = ('unknown', 0); my @main_application_duration = ('unknown', 0); foreach my $d (sort keys %application_info) { $query_application_info .= "$dTotal" . &comma_numbers($application_info{$d}{count}) . "" . &convert_time($application_info{$d}{duration}) . ""; $total_count += $application_info{$d}{count}; if ($main_application[1] < $application_info{$d}{count}) { $main_application[0] = $d; $main_application[1] = $application_info{$d}{count}; } if ($main_application_duration[1] < $application_info{$d}{duration}) { $main_application_duration[0] = $d; $main_application_duration[1] = $application_info{$d}{duration}; } foreach my $r (sort keys %{$application_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_application_info .= "$r" . &comma_numbers($application_info{$d}{$r}) . "" . &convert_time($application_info{$d}{"$r|duration"}) . ""; } } $query_application_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %application_info) { if ((($application_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $application_info{$d}{count} || 0; } else { $infos{"Sum queries per applications < $pie_percentage_limit%"} += $application_info{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per applications < $pie_percentage_limit%"}; delete $infos{"Sum queries per applications < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyapplication_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyapplication', 'Queries per application', %infos); $main_application[1] = &comma_numbers($main_application[1]); $main_application_duration[1] = &convert_time($main_application_duration[1]); print $fh qq{

Queries by application

Key values

  • $main_application[0] Main application
  • $main_application[1] Requests
  • $main_application_duration[1] ($main_application_duration[0])
  • Main time consuming application
$drawn_graphs{queriesbyapplication_graph}
$query_application_info
Application Request type Count Duration
}; delete $drawn_graphs{queriesbyapplication_graph}; } sub print_query_per_user { my %infos = (); my $total_count = 0; my $total_duration = 0; my $query_user_info = ''; my @main_user = ('unknown', 0); my @main_user_duration = ('unknown', 0); foreach my $d (sort keys %user_info) { $query_user_info .= "$dTotal" . &comma_numbers($user_info{$d}{count}) . "" . &convert_time($user_info{$d}{duration}) . ""; $total_count += $user_info{$d}{count}; $total_duration += $user_info{$d}{duration}; if ($main_user[1] < $user_info{$d}{count}) { $main_user[0] = $d; $main_user[1] = $user_info{$d}{count}; } if ($main_user_duration[1] < $user_info{$d}{duration}) { $main_user_duration[0] = $d; $main_user_duration[1] = $user_info{$d}{duration}; } foreach my $r (sort keys %{$user_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_user_info .= "$r" . &comma_numbers($user_info{$d}{$r}) . "" . &convert_time($user_info{$d}{"$r|duration"}) . ""; } } $query_user_info = qq{$NODATA} if (!$total_count); if ($graph) { my %small = (); foreach my $d (sort keys %user_info) { if ((($user_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{queries}{$d} = $user_info{$d}{count} || 0; } else { $infos{queries}{"Sum queries per users < $pie_percentage_limit%"} += $user_info{$d}{count} || 0; push(@{$small{queries}}, $d); } if ((($user_info{$d}{duration} * 100) / ($total_duration || 1)) > $pie_percentage_limit) { $infos{duration}{$d} = $user_info{$d}{duration} || 0; } else { $infos{duration}{"Sum duration per users < $pie_percentage_limit%"} += $user_info{$d}{duration} || 0; push(@{$small{duration}}, $d); } } if ($#{$small{queries}} == 0) { $infos{queries}{$small{queries}[0]} = $infos{queries}{"Sum queries per users < $pie_percentage_limit%"}; delete $infos{queries}{"Sum queries per users < $pie_percentage_limit%"}; } if ($#{$small{duration}} == 0){ $infos{duration}{$small{duration}[0]} = $infos{duration}{"Sum duration per users < $pie_percentage_limit%"}; delete $infos{duration}{"Sum duration per users < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyuser_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyuser', 'Queries per user', %{$infos{queries}}); $drawn_graphs{durationbyuser_graph} = &jqplot_piegraph($graphid++, 'graph_durationbyuser', 'Duration per user', %{$infos{duration}}); $main_user[1] = &comma_numbers($main_user[1]); $main_user_duration[1] = &convert_time($main_user_duration[1]); print $fh qq{

Queries by user

Key values

  • $main_user[0] Main user
  • $main_user[1] Requests
$drawn_graphs{queriesbyuser_graph}
$query_user_info
User Request type Count Duration
}; delete $drawn_graphs{queriesbyuser_graph}; print $fh qq{

Duration by user

Key values

  • $main_user_duration[1] ($main_user_duration[0]) Main time consuming user
$drawn_graphs{durationbyuser_graph}
$query_user_info
User Request type Count Duration
}; delete $drawn_graphs{durationbyuser_graph}; } sub print_query_per_host { my %infos = (); my $total_count = 0; my $query_host_info = ''; my @main_host = ('unknown', 0); my @main_host_duration = ('unknown', 0); foreach my $d (sort keys %host_info) { $query_host_info .= "$dTotal" . &comma_numbers($host_info{$d}{count}) . "" . &convert_time($host_info{$d}{duration}) . ""; $total_count += $host_info{$d}{count}; if ($main_host[1] < $host_info{$d}{count}) { $main_host[0] = $d; $main_host[1] = $host_info{$d}{count}; } if ($main_host_duration[1] < $host_info{$d}{duration}) { $main_host_duration[0] = $d; $main_host_duration[1] = $host_info{$d}{duration}; } foreach my $r (sort keys %{$host_info{$d}}) { next if (($r eq 'count') || ($r =~ /duration/)); $query_host_info .= "$r" . &comma_numbers($host_info{$d}{$r}) . "" . &convert_time($host_info{$d}{"$r|duration"}) . ""; } } $query_host_info = qq{$NODATA} if (!$total_count); if ($graph) { my @small = (); foreach my $d (sort keys %host_info) { if ((($host_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { $infos{$d} = $host_info{$d}{count} || 0; } else { $infos{"Sum queries per hosts < $pie_percentage_limit%"} += $host_info{$d}{count} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum queries per hosts < $pie_percentage_limit%"}; delete $infos{"Sum queries per hosts < $pie_percentage_limit%"}; } } $drawn_graphs{queriesbyhost_graph} = &jqplot_piegraph($graphid++, 'graph_queriesbyhost', 'Queries per host', %infos); $main_host[1] = &comma_numbers($main_host[1]); $main_host_duration[1] = &convert_time($main_host_duration[1]); print $fh qq{

Queries by host

Key values

  • $main_host[0] Main host
  • $main_host[1] Requests
  • $main_host_duration[1] ($main_host_duration[0])
  • Main time consuming host
$drawn_graphs{queriesbyhost_graph}
$query_host_info
Host Request type Count Duration
}; delete $drawn_graphs{queriesbyhost_graph}; } sub print_lock_queries_report { my @top_locked_queries; foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{locks})) { push (@top_locked_queries, [$h, $normalyzed_info{$h}{locks}{count}, $normalyzed_info{$h}{locks}{wait}, $normalyzed_info{$h}{locks}{minwait}, $normalyzed_info{$h}{locks}{maxwait}]); } } # Most frequent waiting queries (N) @top_locked_queries = sort {$b->[2] <=> $a->[2]} @top_locked_queries; print $fh qq{

Most frequent waiting queries (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_locked_queries ; $i++) { my $count = &comma_numbers($top_locked_queries[$i]->[1]); my $total_time = &convert_time($top_locked_queries[$i]->[2]); my $min_time = &convert_time($top_locked_queries[$i]->[3]); my $max_time = &convert_time($top_locked_queries[$i]->[4]); my $avg_time = &convert_time($top_locked_queries[$i]->[2] / ($top_locked_queries[$i]->[1] || 1)); my $query = &highlight_code($top_locked_queries[$i]->[0]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_locked_queries[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_locked_queries[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } if ($#top_locked_queries == -1) { print $fh qq{}; } print $fh qq{
Rank Count Total time Min time Max time Avg duration Query
$rank $count $total_time $min_time $max_time $avg_time
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Date: $normalyzed_info{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n" if ($normalyzed_info{$k}{samples}{$d}{duration}); $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$k}{samples}{$d}{plan}) { my $url = $EXPLAIN_URL . url_escape($normalyzed_info{$k}{samples}{$d}{plan}); $explain = "\n
" . $normalyzed_info{$k}{samples}{$d}{plan} . "
\n"; } print $fh qq{
$query
$md5
$details
$explain }; $idx++; } print $fh qq{

$NODATA
}; @top_locked_queries = (); # Queries that waited the most @top_locked_info = sort {$b->[1] <=> $a->[1]} @top_locked_info; print $fh qq{

Queries that waited the most

}; $rank = 1; for (my $i = 0 ; $i <= $#top_locked_info ; $i++) { my $query = &highlight_code($top_locked_info[$i]->[2]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_locked_info[$i]->[2]) if ($enable_checksum); my $details = "[ Date: " . ($top_locked_info[$i]->[1] || ''); $details .= " - Database: $top_locked_info[$i]->[3]" if ($top_locked_info[$i]->[3]); $details .= " - User: $top_locked_info[$i]->[4]" if ($top_locked_info[$i]->[4]); $details .= " - Remote: $top_locked_info[$i]->[5]" if ($top_locked_info[$i]->[5]); $details .= " - Application: $top_locked_info[$i]->[6]" if ($top_locked_info[$i]->[6]); $details .= " - Bind query: yes" if ($top_locked_info[$i]->[7]); $details .= " ]"; my $time = &convert_time($top_locked_info[$i]->[0]); print $fh qq{ }; $rank++; } if ($#top_locked_info == -1) { print $fh qq{}; } print $fh qq{
Rank Wait time Query
$rank $time
$query
$md5
$details
$NODATA
}; } sub print_tempfile_report { my @top_temporary = (); foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{tempfiles})) { push (@top_temporary, [$h, $normalyzed_info{$h}{tempfiles}{count}, $normalyzed_info{$h}{tempfiles}{size}, $normalyzed_info{$h}{tempfiles}{minsize}, $normalyzed_info{$h}{tempfiles}{maxsize}]); } } # Queries generating the most temporary files (N) if ($#top_temporary >= 0) { @top_temporary = sort {$b->[1] <=> $a->[1]} @top_temporary; print $fh qq{

Queries generating the most temporary files (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_temporary ; $i++) { my $count = &comma_numbers($top_temporary[$i]->[1]); my $total_size = &pretty_print_size($top_temporary[$i]->[2]); my $min_size = &pretty_print_size($top_temporary[$i]->[3]); my $max_size = &pretty_print_size($top_temporary[$i]->[4]); my $avg_size = &pretty_print_size($top_temporary[$i]->[2] / ($top_temporary[$i]->[1] || 1)); my $query = &highlight_code($top_temporary[$i]->[0]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_temporary[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_temporary[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } print $fh qq{
Rank Count Total size Min size Max size Avg size Query
$rank $count $total_size $min_size $max_size $avg_size
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Date: " . $normalyzed_info{$k}{samples}{$d}{date} . "\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$k}{samples}{$d}{bind}); print $fh qq{
$query
$md5
$details
}; $idx++ } print $fh qq{

}; @top_temporary = (); } # Top queries generating the largest temporary files if ($#top_tempfile_info >= 0) { @top_tempfile_info = sort {$b->[0] <=> $a->[0]} @top_tempfile_info; my $largest = &comma_numbers($top_temporary[0]->[0]); print $fh qq{

Queries generating the largest temporary files

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_tempfile_info ; $i++) { my $size = &pretty_print_size($top_tempfile_info[$i]->[0]); my $details = "[ Date: $top_tempfile_info[$i]->[1]"; $details .= " - Database: $top_tempfile_info[$i]->[3]" if ($top_tempfile_info[$i]->[3]); $details .= " - User: $top_tempfile_info[$i]->[4]" if ($top_tempfile_info[$i]->[4]); $details .= " - Remote: $top_tempfile_info[$i]->[5]" if ($top_tempfile_info[$i]->[5]); $details .= " - Application: $top_tempfile_info[$i]->[6]" if ($top_tempfile_info[$i]->[6]); $details .= " - Bind yes: yes" if ($top_tempfile_info[$i]->[7]); $details .= " ]"; my $query = &highlight_code($top_tempfile_info[$i]->[2]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_tempfile_info[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $rank++; } print $fh qq{
Rank Size Query
$rank $size
$query
$md5
$details
}; @top_tempfile_info = (); } } sub print_cancelled_report { my @top_cancelled = (); foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{cancelled})) { push (@top_cancelled, [$h, $normalyzed_info{$h}{cancelled}{count}]); } } # Queries generating the most cancellation (N) if ($#top_cancelled >= 0) { @top_cancelled = sort {$b->[1] <=> $a->[1]} @top_cancelled; print $fh qq{

Queries generating the most cancellation (N)

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_cancelled ; $i++) { my $count = &comma_numbers($top_cancelled[$i]->[1]); my $query = &highlight_code($top_cancelled[$i]->[0]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_cancelled[$i]->[0]) if ($enable_checksum); my $example = qq{

}; my $k = $top_cancelled[$i]->[0]; $example = '' if (scalar keys %{$normalyzed_info{$k}{samples}} == 0); print $fh qq{ }; } $rank++; } print $fh qq{
Rank Count Query
$rank $count
$query
$md5 $example
}; if (scalar keys %{$normalyzed_info{$k}{samples}}) { my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); my $details = "Duration: " . &convert_time($d) . "
"; $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}
" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}
" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}
" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}
" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes
" if ($normalyzed_info{$k}{samples}{$d}{bind}); print $fh qq{
$query
$md5
$details
}; $idx++ } print $fh qq{

}; @top_cancelled = (); } # Top queries generating the most cancellation if ($#top_cancelled_info >= 0) { @top_cancelled_info = sort {$b->[0] <=> $a->[0]} @top_cancelled_info; my $largest = &comma_numbers($top_cancelled[0]->[0]); print $fh qq{

Queries most cancelled

}; my $rank = 1; for (my $i = 0 ; $i <= $#top_cancelled_info ; $i++) { my $count = &comma_numbers($top_cancelled_info[$i]->[0]); my $details = "[ Date: $top_cancelled_info[$i]->[1]"; $details .= " - Database: $top_cancelled_info[$i]->[3]" if ($top_cancelled_info[$i]->[3]); $details .= " - User: $top_cancelled_info[$i]->[4]" if ($top_cancelled_info[$i]->[4]); $details .= " - Remote: $top_cancelled_info[$i]->[5]" if ($top_cancelled_info[$i]->[5]); $details .= " - Application: $top_cancelled_info[$i]->[6]" if ($top_cancelled_info[$i]->[6]); $details .= " - Bind yes: yes" if ($top_cancelled_info[$i]->[7]); $details .= " ]"; my $query = &highlight_code($top_cancelled_info[$i]->[2]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_cancelled_info[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $rank++; } print $fh qq{
Rank Number Query
$rank $count
$query
$md5
$details
}; @top_cancelled_info = (); } } sub print_histogram_query_times { my %data = (); my $histogram_info = ''; my $most_range = ''; my $most_range_value = ''; for (my $i = 1; $i <= $#histogram_query_time; $i++) { $histogram_info .= "$histogram_query_time[$i-1]-$histogram_query_time[$i]ms" . &comma_numbers($overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]}) . "" . sprintf("%0.2f", ($overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]} * 100) / ($overall_stat{histogram}{query_total}||1)) . "%"; $data{"$histogram_query_time[$i-1]-$histogram_query_time[$i]ms"} = ($overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]} || 0); if ($overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]} > $most_range_value) { $most_range = "$histogram_query_time[$i-1]-$histogram_query_time[$i]ms"; $most_range_value = $overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]}; } } if ($overall_stat{histogram}{query_total} > 0) { $data{"> $histogram_query_time[-1]ms"} = ($overall_stat{histogram}{query_time}{"-1"} || 0); $histogram_info .= " > $histogram_query_time[-1]ms" . &comma_numbers($overall_stat{histogram}{query_time}{'-1'}) . "" . sprintf("%0.2f", ($overall_stat{histogram}{query_time}{'-1'} * 100) / ($overall_stat{histogram}{query_total}||1)) . "%"; $data{"> $histogram_query_time[-1]ms"} = $overall_stat{histogram}{query_time}{"-1"} if ($overall_stat{histogram}{query_time}{"-1"} > 0); if ($overall_stat{histogram}{query_time}{"-1"} > $most_range_value) { $most_range = "> $histogram_query_time[-1]ms"; $most_range_value = $overall_stat{histogram}{query_time}{"-1"}; } } else { $histogram_info = qq{$NODATA}; } $drawn_graphs{histogram_query_times_graph} = &jqplot_duration_histograph($graphid++, 'graph_histogram_query_times', 'Queries', \@histogram_query_time, %data); $most_range_value = &comma_numbers($most_range_value) if ($most_range_value); print $fh qq{

Top Queries

Histogram of query times

Key values

  • $most_range_value $most_range duration
$drawn_graphs{histogram_query_times_graph}
$histogram_info
Range Count Percentage
}; delete $drawn_graphs{histogram_query_times_graph}; } sub print_slowest_individual_queries { print $fh qq{

Slowest individual queries

}; my $idx = 1; for (my $i = 0 ; $i <= $#top_slowest ; $i++) { my $rank = $i + 1; my $duration = &convert_time($top_slowest[$i]->[0]); my $date = $top_slowest[$i]->[1] || ''; my $details = "[ Date: " . ($top_slowest[$i]->[1] || ''); $details .= " - Database: $top_slowest[$i]->[3]" if ($top_slowest[$i]->[3]); $details .= " - User: $top_slowest[$i]->[4]" if ($top_slowest[$i]->[4]); $details .= " - Remote: $top_slowest[$i]->[5]" if ($top_slowest[$i]->[5]); $details .= " - Application: $top_slowest[$i]->[6]" if ($top_slowest[$i]->[6]); $details .= " - Bind query: yes" if ($top_slowest[$i]->[7]); $details .= " ]"; my $explain = ''; if ($top_slowest[$i]->[8]) { my $url = $EXPLAIN_URL . url_escape($top_slowest[$i]->[8]); $explain = "\n
" . $top_slowest[$i]->[8] . "
\n"; } my $query = &highlight_code($top_slowest[$i]->[2]); my $md5 = ''; $md5 = 'md5: ' . md5_hex($top_slowest[$i]->[2]) if ($enable_checksum); print $fh qq{ }; $idx++; } if ($#top_slowest == -1) { print $fh qq{}; } print $fh qq{
Rank Duration Query
$rank $duration
$query
$md5
$details
$explain
$NODATA
}; } sub print_time_consuming { print $fh qq{

Time consuming queries

}; my $rank = 1; my $found = 0; foreach my $k (sort {$normalyzed_info{$b}{duration} <=> $normalyzed_info{$a}{duration}} keys %normalyzed_info) { next if (!$normalyzed_info{$k}{count} || !exists $normalyzed_info{$k}{duration}); last if ($rank > $top); $found++; $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; my $duration = &convert_time($normalyzed_info{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$k}{count}); my $min = &convert_time($normalyzed_info{$k}{min}); my $max = &convert_time($normalyzed_info{$k}{max}); my $avg = &convert_time($normalyzed_info{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { $d =~ /^(\d{4})(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$2} $3"; foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { $normalyzed_info{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / ($normalyzed_info{$k}{chronos}{$d}{$h}{count} || 1); $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $normalyzed_info{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$h:$rd"} += ($normalyzed_info{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $users_involved = ''; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { $apps_involved = qq{}; } my $query_histo = &jqplot_histograph($graphid++, 'time_consuming_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Total duration Times executed Min duration Max duration Avg duration Query
$zday$h" . &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
$zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}/($hourly_count{"$h:$rd"}||1)) . "
$rank $duration $count

Details

$min $max $avg
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{users}{$b}{duration} <=> $normalyzed_info{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$k}{users}}) { if ($normalyzed_info{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{apps}{$b}{duration} <=> $normalyzed_info{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$k}{apps}}) { if ($normalyzed_info{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$k}{samples}{$d}{plan}) { my $url = $EXPLAIN_URL . url_escape($normalyzed_info{$k}{samples}{$d}{plan}); $explain = "\n
" . $normalyzed_info{$k}{samples}{$d}{plan} . "
\n"; } $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain }; $idx++; } print $fh qq{

$NODATA
}; } sub print_most_frequent { print $fh qq{

Most frequent queries (N)

}; my $rank = 1; foreach my $k (sort {$normalyzed_info{$b}{count} <=> $normalyzed_info{$a}{count}} keys %normalyzed_info) { next if (!$normalyzed_info{$k}{count}); last if ($rank > $top); $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; my $duration = &convert_time($normalyzed_info{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$k}{count}); my $min = &convert_time($normalyzed_info{$k}{min}); my $max = &convert_time($normalyzed_info{$k}{max}); my $avg = &convert_time($normalyzed_info{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my %hourly_count = (); my %hourly_duration = (); my $days = 0; my $details = ''; foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { $d =~ /^\d{4}(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { $normalyzed_info{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $normalyzed_info{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$h:$rd"} += ($normalyzed_info{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $query_histo = &jqplot_histograph($graphid++, 'most_frequent_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); my $users_involved = ''; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { $apps_involved = qq{}; } print $fh qq{ }; $rank++; } if (scalar keys %normalyzed_info == 0) { print $fh qq{}; } print $fh qq{
Rank Times executed Total duration Min duration Max duration Avg duration Query
$zday$h" . &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
$zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}/($hourly_count{"$h:$rd"}||1)) . "
$rank $count

Details

$duration $min $max $avg
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved $apps_involved

}; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{users}{$b}{duration} <=> $normalyzed_info{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$k}{users}}) { if ($normalyzed_info{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{apps}{$b}{duration} <=> $normalyzed_info{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$k}{apps}}) { if ($normalyzed_info{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$k}{samples}{$d}{plan}) { my $url = $EXPLAIN_URL . url_escape($normalyzed_info{$k}{samples}{$d}{plan}); $explain = "\n
" . $normalyzed_info{$k}{samples}{$d}{plan} . "
"; } $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain }; $idx++; } print $fh qq{

$NODATA
}; } sub print_slowest_queries { print $fh qq{

Normalized slowest queries (N)

}; my $rank = 1; my $found = 0; foreach my $k (sort {$normalyzed_info{$b}{average} <=> $normalyzed_info{$a}{average}} keys %normalyzed_info) { next if (!$k || !$normalyzed_info{$k}{count} || !exists $normalyzed_info{$k}{duration}); last if ($rank > $top); $found++; $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; my $duration = &convert_time($normalyzed_info{$k}{duration}); my $count = &comma_numbers($normalyzed_info{$k}{count}); my $min = &convert_time($normalyzed_info{$k}{min}); my $max = &convert_time($normalyzed_info{$k}{max}); my $avg = &convert_time($normalyzed_info{$k}{average}); my $query = &highlight_code($k); my $md5 = ''; $md5 = 'md5: ' . md5_hex($k) if ($enable_checksum); my $details = ''; my %hourly_count = (); my %hourly_duration = (); my $days = 0; foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { $normalyzed_info{$k}{chronos}{$d}{$h}{average} = $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; $details .= ""; $zday = ""; foreach my $m (sort keys %{$normalyzed_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $normalyzed_info{$k}{chronos}{$d}{$h}{min}{$m}; $hourly_duration{"$h:$rd"} += ($normalyzed_info{$k}{chronos}{$d}{$h}{min_duration}{$m} || 0); } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; $graph_data{duration} .= "['$h:$rd'," . (int($hourly_duration{"$h:$rd"} / ($hourly_count{"$h:$rd"} || 1)) || 0) . "],"; } } $graph_data{count} =~ s/,$//; $graph_data{duration} =~ s/,$//; %hourly_count = (); %hourly_duration = (); my $query_histo = &jqplot_histograph($graphid++, 'normalized_slowest_queries_details_'.$rank, $graph_data{count}, $graph_data{duration}, 'Queries', 'Avg. duration'); my $users_involved = ''; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { $users_involved = qq{}; } my $apps_involved = ''; if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { $apps_involved = qq{}; } print $fh qq{ }; $rank++; } if (!$found) { print $fh qq{}; } print $fh qq{
Rank Min duration Max duration Avg duration Times executed Total duration Query
$zday$h" . &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
$zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}) . "" . &convert_time($hourly_duration{"$h:$rd"}/($hourly_count{"$h:$rd"}||1)) . "
$rank $min $max $avg $count

Details

$duration
$query
$md5

Times Reported Time consuming queries #$rank

$query_histo $details
Day Hour Count Duration Avg duration

$users_involved

}; if (scalar keys %{$normalyzed_info{$k}{users}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{users}{$b}{duration} <=> $normalyzed_info{$k}{users}{$a}{duration}} keys %{$normalyzed_info{$k}{users}}) { if ($normalyzed_info{$k}{users}{$u}{duration} > 0) { my $details = "[ User: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{users}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{users}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } if (scalar keys %{$normalyzed_info{$k}{apps}} > 0) { print $fh qq{
}; my $idx = 1; foreach my $u (sort {$normalyzed_info{$k}{apps}{$b}{duration} <=> $normalyzed_info{$k}{apps}{$a}{duration}} keys %{$normalyzed_info{$k}{apps}}) { if ($normalyzed_info{$k}{apps}{$u}{duration} > 0) { my $details = "[ Application: $u"; $details .= " - Total duration: ".&convert_time($normalyzed_info{$k}{apps}{$u}{duration}); $details .= " - Times executed: $normalyzed_info{$k}{apps}{$u}{count}"; $details .= " ]\n"; print $fh qq{
$details
}; $idx++; } } print $fh qq{

}; } print $fh qq{
}; my $idx = 1; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { last if ($idx > $sample); my $details = "Date: $normalyzed_info{$k}{samples}{$d}{date}\n"; $details .= "Duration: " . &convert_time($d) . "\n"; $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}\n" if ($normalyzed_info{$k}{samples}{$d}{db}); $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}\n" if ($normalyzed_info{$k}{samples}{$d}{user}); $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}\n" if ($normalyzed_info{$k}{samples}{$d}{remote}); $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}\n" if ($normalyzed_info{$k}{samples}{$d}{app}); $details .= "Bind query: yes\n" if ($normalyzed_info{$k}{samples}{$d}{bind}); my $explain = ''; if ($normalyzed_info{$k}{samples}{$d}{plan}) { my $url = $EXPLAIN_URL . url_escape($normalyzed_info{$k}{samples}{$d}{plan}); $explain = "\n
" . $normalyzed_info{$k}{samples}{$d}{plan} . "
\n"; } $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); my $md5 = ''; $md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum); print $fh qq{
$query
$md5
$details
$explain }; $idx++; } print $fh qq{

$NODATA
}; } sub dump_as_html { my $uri = shift; # Dump the html header &html_header($uri); if (!$error_only) { if (!$pgbouncer_only) { # Overall statistics print $fh qq{
  • }; &print_overall_statistics(); } # Set graphs limits $overall_stat{'first_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; $t_min = timegm_nocheck(0, $5, $4, $3, $2 - 1, $1) * 1000; $t_min += ($timezone*1000); $t_min -= ($avg_minutes * 60000); $overall_stat{'last_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; $t_max = timegm_nocheck(59, $5, $4, $3, $2 - 1, $1) * 1000; $t_max += ($timezone*1000); $t_max += ($avg_minutes * 60000); if (!$disable_hourly && !$pgbouncer_only) { # Build graphs based on hourly stat &compute_query_graphs(); # Show global SQL traffic &print_sql_traffic(); # Show hourly statistics &print_general_activity(); } if (!$disable_connection && !$pgbouncer_only) { print $fh qq{
  • Connections

    }; # Draw connections information &print_established_connection() if (!$disable_hourly); # Show per database/user connections &print_database_connection(); # Show per user connections &print_user_connection(); # Show per client ip connections &print_host_connection(); } # Show session per database statistics if (!$disable_session && !$pgbouncer_only) { print $fh qq{
  • Sessions

    }; # Show number of simultaneous sessions &print_simultaneous_session(); # Show histogram for session times &print_histogram_session_times(); # Show per database sessions &print_database_session(); # Show per user sessions &print_user_session(); # Show per host sessions &print_host_session(); # Show per application sessions &print_app_session(); } # Display checkpoint and temporary files report if (!$disable_checkpoint && !$pgbouncer_only) { print $fh qq{
  • }; &print_checkpoint(); } if (!$disable_temporary && !$pgbouncer_only) { print $fh qq{
  • }; # Show temporary files detailed information &print_temporary_file(); # Show information about queries generating temporary files &print_tempfile_report(); } if (!$disable_autovacuum && !$pgbouncer_only) { print $fh qq{
  • }; # Show detailed vacuum/analyse information &print_vacuum(); } if (!$disable_lock && !$pgbouncer_only) { print $fh qq{
  • }; # Lock stats per type &print_lock_type(); # Show lock wait detailed information &print_lock_queries_report(); } if (!$disable_query && !$pgbouncer_only) { print $fh qq{
  • }; # INSERT/DELETE/UPDATE/SELECT repartition if (!$disable_type) { &print_query_type(); # Show requests per database &print_query_per_database(); # Show requests per user &print_query_per_user(); # Show requests per host &print_query_per_host(); # Show requests per application &print_query_per_application(); ; # Show cancelled queries detailed information &print_cancelled_queries(); # Show information about cancelled queries &print_cancelled_report(); } print $fh qq{
  • }; # Show histogram for query times &print_histogram_query_times(); # Show top information &print_slowest_individual_queries(); # Show queries that took up the most time &print_time_consuming(); # Show most frequent queries &print_most_frequent(); # Print normalized slowest queries &print_slowest_queries } # Show pgbouncer sessions and connections statistics if (exists $pgb_overall_stat{peak}) { # Build pgbouncer graph based on hourly stats &compute_pgbouncer_graphs(); my $active = ''; $active = ' active-slide' if ($pgbouncer_only); print $fh qq{
  • pgBouncer

    }; # Draw pgbouncer own statistics &print_pgbouncer_stats() if (!$disable_hourly); # Draw connections information &print_established_pgb_connection() if (!$disable_hourly); # Show per database/user connections &print_database_pgb_connection(); # Show per user connections &print_user_pgb_connection(); # Show per client ip connections &print_host_pgb_connection(); # Show number of simultaneous sessions &print_simultaneous_pgb_session(); # Show histogram for session times &print_histogram_pgb_session_times(); # Show per database sessions &print_database_pgb_session(); # Show per user sessions &print_user_pgb_session(); # Show per host sessions &print_host_pgb_session(); # Show most used reserved pool &show_pgb_reserved_pool(); # Show Most Frequent Errors/Events &show_pgb_error_as_html(); } } # Show errors report if (!$disable_error) { if (!$error_only) { print $fh qq{
  • }; } else { print $fh qq{
  • }; } # Show log level distribution &print_log_level(); # Show error code distribution &print_error_code() if (scalar keys %errors_code > 0); # Show Most Frequent Errors/Events &show_error_as_html(); } # Dump the html footer &html_footer(); } sub url_escape { my $toencode = shift; return if (!$toencode); utf8::encode($toencode) if (($] >= 5.008) && utf8::is_utf8($toencode)); if (EBCDIC) { $toencode =~ s/([^a-zA-Z0-9_.~-])/uc sprintf("%%%02x",$E2A[ord($1)])/eg; } else { $toencode =~ s/([^a-zA-Z0-9_.~-])/uc sprintf("%%%02x",ord($1))/eg; } return $toencode; } sub escape_html { $_[0] =~ s/<([\/a-zA-Z][\s\>]*)/\<$1/sg; return $_[0]; } sub print_log_level { my %infos = (); my $ret = 0; # Some messages have seen their log level change during log parsing. # Set the real log level count back foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); if ($error_info{$k}{count} > 1) { for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) { ($error_info{$k}{error}[$i], $ret) = &revert_log_level($error_info{$k}{error}[$i]); if ($ret) { $logs_type{ERROR}--; $logs_type{LOG}++; } } } else { ($error_info{$k}{error}[0], $ret) = &revert_log_level($error_info{$k}{error}[0]); if ($ret) { $logs_type{ERROR}--; $logs_type{LOG}++; } } } # Show log types my $total_logs = 0; foreach my $d (sort keys %logs_type) { $total_logs += $logs_type{$d}; } my $logtype_info = ''; foreach my $d (sort keys %logs_type) { next if (!$logs_type{$d}); $logtype_info .= "$d" . &comma_numbers($logs_type{$d}) . "" . sprintf("%0.2f", ($logs_type{$d} * 100) / ($total_logs||1)) . "%"; } my %graph_data = (); my %max_events = (); if ($graph) { my @small = (); foreach my $d (sort keys %logs_type) { if ((($logs_type{$d} * 100) / ($total_logs || 1)) > $pie_percentage_limit) { $infos{$d} = $logs_type{$d} || 0; } else { $infos{"Sum log types < $pie_percentage_limit%"} += $logs_type{$d} || 0; push(@small, $d); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum log types < $pie_percentage_limit%"}; delete $infos{"Sum log types < $pie_percentage_limit%"}; } foreach my $l (qw(FATAL WARNING ERROR PANIC)) { $max_events{$l} = 0; } foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; my $mo = $2 - 1; my $d = $3; foreach my $h ("00" .. "23") { next if (!exists $per_minute_info{$tm}{$h}); my %e_dataavg = (); foreach my $m ("00" .. "59") { next if (!exists $per_minute_info{$tm}{$h}{$m}); my $rd = &average_per_minutes($m, $avg_minutes); if (exists $per_minute_info{$tm}{$h}{$m}{log_level}) { # Average per minute foreach my $l (qw(FATAL WARNING ERROR PANIC)) { $e_dataavg{$l}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{log_level}{$l} || 0); $max_events{$l} += ($per_minute_info{$tm}{$h}{$m}{log_level}{$l} || 0); } delete $per_minute_info{$tm}{$h}{$m}{log_level}; } } foreach my $rd (@avgs) { my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; $t += ($timezone*1000); next if ($t < $t_min); last if ($t > $t_max); if (scalar keys %e_dataavg> 0) { foreach my $l (qw(FATAL ERROR PANIC WARNING)) { $graph_data{$l} .= "[$t, " . ($e_dataavg{$l}{"$rd"} || 0) . "],"; } } } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } } $drawn_graphs{'eventspersecond_graph'} = &jqplot_linegraph( $graphid++, 'eventspersecond_graph', $graph_data{'PANIC'}, $graph_data{FATAL}, $graph_data{'ERROR'}, 'Errors per ' . $avg_minutes . ' minutes', 'Errors per ' . $avg_minutes . ' minutes', 'PANIC', 'FATAL', 'ERROR', '', $graph_data{'WARNING'}, 'WARNING' ); $drawn_graphs{logstype_graph} = &jqplot_piegraph($graphid++, 'graph_logstype', 'Logs per type', %infos); if (!$total_logs) { $logtype_info = qq{$NODATA}; } $logs_type{ERROR} ||= 0; $logs_type{FATAL} ||= 0; $total_logs = &comma_numbers($total_logs); print $fh qq{

    Events

    Log levels

    Key values

    • $total_logs Log entries
    $drawn_graphs{logstype_graph}
    $logtype_info
    Type Count Percentage

    Events distribution

    Key values

    • $max_events{PANIC} PANIC entries
    • $max_events{FATAL} FATAL entries
    • $max_events{ERROR} ERROR entries
    • $max_events{WARNING} WARNING entries
    $drawn_graphs{'eventspersecond_graph'}
    }; delete $drawn_graphs{logstype_graph}; delete $drawn_graphs{'eventspersecond_graph'}; } sub print_error_code { my %infos = (); my %class_error = (); my $ret = 0; my $total_logs = 0; foreach my $d (sort keys %errors_code) { $total_logs += $errors_code{$d}; $d =~ /^(..)/; $class_error{$1} += $errors_code{$d}; } my $errorclass_info = ''; foreach my $d (sort keys %class_error) { next if (!$class_error{$d}); $errorclass_info .= "$CLASS_ERROR_CODE{$d}$d" . &comma_numbers($class_error{$d}) . "" . sprintf("%0.2f", ($class_error{$d} * 100) / ($total_logs||1)) . "%"; } my %graph_data = (); my %max_events = (); my $most_present = ''; if ($graph) { my @small = (); foreach my $d (sort { $class_error{$b} <=> $class_error{$a} } keys %class_error) { $most_present = $CLASS_ERROR_CODE{$d} if (!$most_present); if ((($class_error{$d} * 100) / ($total_logs || 1)) > $pie_percentage_limit) { $infos{$CLASS_ERROR_CODE{$d}} = $class_error{$d} || 0; } else { $infos{"Sum error classes < $pie_percentage_limit%"} += $class_error{$d} || 0; push(@small, $CLASS_ERROR_CODE{$d}); } } if ($#small == 0) { $infos{$small[0]} = $infos{"Sum error classes < $pie_percentage_limit%"}; delete $infos{"Sum error classes < $pie_percentage_limit%"}; } } $drawn_graphs{errorclass_graph} = &jqplot_piegraph($graphid++, 'graph_errorclass', 'Error class distribution', %infos); if (!$total_logs) { $errorclass_info = qq{$NODATA}; } $total_logs = &comma_numbers($total_logs); print $fh qq{

    Error class distribution

    Key values

    • $total_logs Errors count
    • $most_present most present error class
    $drawn_graphs{errorclass_graph}
    $errorclass_info
    Class Code Count Percentage
    }; delete $drawn_graphs{errorclass_graph}; } sub show_error_as_html { my $main_error = 0; my $total = 0; foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); $main_error = &comma_numbers($error_info{$k}{count}) if (!$main_error); $total += $error_info{$k}{count}; } $total = &comma_numbers($total); print $fh qq{

    Most Frequent Errors/Events

    Key values

    • $main_error Max number of times the same event was reported
    • $total Total events found
    }; my $rank = 1; foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); my $count = &comma_numbers($error_info{$k}{count}); my ($msg, $ret) = &revert_log_level($k); my $error_level_class = 'text-danger'; if ($msg =~ /^WARNING: /) { $error_level_class = 'text-warning'; } elsif ($msg =~ /^LOG: /) { $error_level_class = 'text-success'; } elsif ($msg =~ /^HINT: /) { $error_level_class = 'text-info'; } elsif ($msg =~ /^FATAL: /) { $error_level_class = 'text-fatal'; } elsif ($msg =~ /^PANIC: /) { $error_level_class = 'text-panic'; } my $details = ''; my %hourly_count = (); my $days = 0; foreach my $d (sort keys %{$error_info{$k}{chronos}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort keys %{$error_info{$k}{chronos}{$d}}) { $details .= ""; $zday = ""; foreach my $m (sort keys %{$error_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $error_info{$k}{chronos}{$d}{$h}{min}{$m}; } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; } } $graph_data{count} =~ s/,$//; %hourly_count = (); my $error_histo = &jqplot_histograph($graphid++, 'most_frequent_error_'.$rank, $graph_data{count}, '', 'Events', ''); # Escape HTML code in error message $msg = &escape_html($msg); print $fh qq{ }; $rank++; } if (scalar keys %error_info == 0) { print $fh qq{}; } print $fh qq{
    Rank Times reported Error
    $zday$h" . &comma_numbers($error_info{$k}{chronos}{$d}{$h}{count}) . "
    $zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "
    $rank $count

    Details

    $msg

    Times Reported Most Frequent Error / Event #$rank

    $error_histo $details
    Day Hour Count

    }; print $fh qq{

    } if (($sample > 0) && ($#{$error_info{$k}{date}} >= 0)); print $fh qq{
    }; for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) { last if (($sample > 0) && ($i == $sample)); # Escape HTML code in error message my $message = &escape_html($error_info{$k}{error}[$i]); my $details = "Date: " . $error_info{$k}{date}[$i] . "\n"; my $info = ''; if ($error_info{$k}{detail}[$i]) { $info .= "Detail: " . &escape_html($error_info{$k}{detail}[$i]) . "
    "; } if ($error_info{$k}{context}[$i]) { $info .= "Context: " . &escape_html($error_info{$k}{context}[$i]) . "
    "; } if ($error_info{$k}{hint}[$i]) { $info .= "Hint: " . &escape_html($error_info{$k}{hint}[$i]) . "
    "; } if ($error_info{$k}{statement}[$i]) { $info .= "Statement: " . &escape_html($error_info{$k}{statement}[$i]) . "
    "; } if ($error_info{$k}{db}[$i]) { $details .= "Database: $error_info{$k}{db}[$i]\n"; $details .= "Application: $error_info{$k}{app}[$i]\n"; $details .= "User: $error_info{$k}{user}[$i]\n"; $details .= "Remote: $error_info{$k}{remote}[$i]\n"; $details .= "Code: $error_info{$k}{sqlstate}[$i]\n"; } $details =~ s/$//s; print $fh qq{
    $message

    $info

    $details
    }; } print $fh qq{

    $NODATA
    }; } sub show_pgb_error_as_html { my $main_error = 0; my $total = 0; foreach my $k (sort {$pgb_error_info{$b}{count} <=> $pgb_error_info{$a}{count}} keys %pgb_error_info) { next if (!$pgb_error_info{$k}{count}); $main_error = &comma_numbers($pgb_error_info{$k}{count}) if (!$main_error); $total += $pgb_error_info{$k}{count}; } $total = &comma_numbers($total); print $fh qq{

    Most Frequent Errors/Events

    Key values

    • $main_error Max number of times the same event was reported
    • $total Total events found
    }; my $rank = 1; foreach my $k (sort {$pgb_error_info{$b}{count} <=> $pgb_error_info{$a}{count}} keys %pgb_error_info) { next if (!$pgb_error_info{$k}{count}); my $count = &comma_numbers($pgb_error_info{$k}{count}); my $msg = $k; my $error_level_class = 'text-danger'; if ($msg =~ /^WARNING: /) { $error_level_class = 'text-warning'; } elsif ($msg =~ /^LOG: /) { $error_level_class = 'text-success'; } elsif ($msg =~ /^FATAL: /) { $error_level_class = 'text-fatal'; } elsif ($msg =~ /^PANIC: /) { $error_level_class = 'text-panic'; } my $details = ''; my %hourly_count = (); my $days = 0; foreach my $d (sort keys %{$pgb_error_info{$k}{chronos}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort keys %{$pgb_error_info{$k}{chronos}{$d}}) { $details .= ""; $zday = ""; foreach my $m (sort keys %{$pgb_error_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $pgb_error_info{$k}{chronos}{$d}{$h}{min}{$m}; } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; } } $graph_data{count} =~ s/,$//; %hourly_count = (); my $error_histo = &jqplot_histograph($graphid++, 'pgbmost_frequent_error_'.$rank, $graph_data{count}, '', 'Events', ''); # Escape HTML code in error message $msg = &escape_html($msg); print $fh qq{ }; $rank++; } if (scalar keys %pgb_error_info == 0) { print $fh qq{}; } print $fh qq{
    Rank Times reported Error
    $zday$h" . &comma_numbers($pgb_error_info{$k}{chronos}{$d}{$h}{count}) . "
    $zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "
    $rank $count

    Details

    $msg

    Times Reported Most Frequent Error / Event #$rank

    $error_histo $details
    Day Hour Count

    }; print $fh qq{

    } if (($sample > 0) && ($#{$pgb_error_info{$k}{date}} >= 0)); print $fh qq{
    }; for (my $i = 0 ; $i <= $#{$pgb_error_info{$k}{date}} ; $i++) { last if (($sample > 0) && ($i == $sample)); # Escape HTML code in error message my $message = &escape_html($pgb_error_info{$k}{error}[$i]); my $details = "Date: " . $pgb_error_info{$k}{date}[$i] . "\n"; if ($pgb_error_info{$k}{db}[$i]) { $details .= "Database: $pgb_error_info{$k}{db}[$i] User: $pgb_error_info{$k}{user}[$i] Remote: $pgb_error_info{$k}{remote}[$i]
    "; } print $fh qq{
    $message
    $details
    }; } print $fh qq{

    $NODATA
    }; } sub show_pgb_reserved_pool { my $main_pool = ''; my $main_pool_val = 0; my $total = 0; foreach my $k (sort {$pgb_pool_info{$b}{count} <=> $pgb_pool_info{$a}{count}} keys %pgb_pool_info) { if (!$main_pool || ($pgb_pool_info{$k}{count} > $pgb_pool_info{$main_pool}{count})) { $main_pool = $k; $main_pool_val = &comma_numbers($pgb_pool_info{$k}{count}); } $total += $pgb_pool_info{$k}{count}; } $total = &comma_numbers($total); print $fh qq{

    Most used reserved pools

    Key values

    • $main_pool Most used reserved pool
    • $main_pool_val Time used
    • $total Total time reserved pools was used
    }; my $rank = 1; foreach my $k (sort {$pgb_pool_info{$b}{count} <=> $pgb_pool_info{$a}{count}} keys %pgb_pool_info) { next if (!$pgb_pool_info{$k}{count}); my $count = &comma_numbers($pgb_pool_info{$k}{count}); my $details = ''; my %hourly_count = (); my $days = 0; foreach my $d (sort keys %{$pgb_pool_info{$k}{chronos}}) { my $c = 1; $d =~ /^\d{4}(\d{2})(\d{2})$/; $days++; my $zday = "$abbr_month{$1} $2"; foreach my $h (sort keys %{$pgb_pool_info{$k}{chronos}{$d}}) { $details .= ""; $zday = ""; foreach my $m (sort keys %{$pgb_pool_info{$k}{chronos}{$d}{$h}{min}}) { my $rd = &average_per_minutes($m, $histo_avg_minutes); $hourly_count{"$h:$rd"} += $pgb_pool_info{$k}{chronos}{$d}{$h}{min}{$m}; } if ($#histo_avgs > 0) { foreach my $rd (@histo_avgs) { next if (!exists $hourly_count{"$h:$rd"}); $details .= ""; } } } } # Set graph dataset my %graph_data = (); foreach my $h ("00" .. "23") { foreach my $rd (@histo_avgs) { $graph_data{count} .= "['$h:$rd'," . ($hourly_count{"$h:$rd"} || 0) . "],"; } } $graph_data{count} =~ s/,$//; %hourly_count = (); my $pool_histo = &jqplot_histograph($graphid++, 'pgbmost_used_reserved_pool_'.$rank, $graph_data{count}, '', 'Avg. used', ''); # Escape HTML code in pool message print $fh qq{ }; $rank++; } if (scalar keys %pgb_pool_info == 0) { print $fh qq{}; } print $fh qq{
    Rank Times used Reserved pool
    $zday$h" . &comma_numbers($pgb_pool_info{$k}{chronos}{$d}{$h}{count}) . "
    $zday$h:$rd" . &comma_numbers($hourly_count{"$h:$rd"}) . "
    $rank $count

    Details

    $k

    Times Reported Most used reserved pools #$rank

    $pool_histo $details
    Day Hour Count

    $NODATA
    }; } sub load_stats { my $fd = shift; my %stats = %{ fd_retrieve($fd) }; my %_overall_stat = %{$stats{overall_stat}}; my %_pgb_overall_stat = %{$stats{pgb_overall_stat}}; my %_overall_checkpoint = %{$stats{overall_checkpoint}}; my %_normalyzed_info = %{$stats{normalyzed_info}}; my %_error_info = %{$stats{error_info}}; my %_pgb_error_info = %{$stats{pgb_error_info}}; my %_pgb_pool_info = %{$stats{pgb_pool_info}}; my %_connection_info = %{$stats{connection_info}}; my %_pgb_connection_info = %{$stats{pgb_connection_info}}; my %_database_info = %{$stats{database_info}}; my %_application_info = %{$stats{application_info}}; my %_user_info = %{$stats{user_info}}; my %_host_info = %{$stats{host_info}}; my %_checkpoint_info = %{$stats{checkpoint_info}}; my %_session_info = %{$stats{session_info}}; my %_pgb_session_info = %{$stats{pgb_session_info}}; my %_tempfile_info = %{$stats{tempfile_info}}; my %_cancelled_info = %{$stats{cancelled_info}}; my %_logs_type = %{$stats{logs_type}}; my %_errors_code = %{$stats{errors_code}}; my %_lock_info = %{$stats{lock_info}}; my %_per_minute_info = %{$stats{per_minute_info}}; my %_pgb_per_minute_info = %{$stats{pgb_per_minute_info}}; my @_top_slowest = @{$stats{top_slowest}}; my $_nlines = $stats{nlines}; my $_first_log_timestamp = $stats{first_log_timestamp}; my $_last_log_timestamp = $stats{last_log_timestamp}; my @_log_files = @{$stats{log_files}}; my %_autovacuum_info = %{$stats{autovacuum_info}}; my %_autoanalyze_info = %{$stats{autoanalyze_info}}; my @_top_locked_info = @{$stats{top_locked_info}}; my @_top_tempfile_info = @{$stats{top_tempfile_info}}; my @_top_cancelled_info = @{$stats{top_cancelled_info}}; ### overall_stat ### $overall_stat{queries_number} += $_overall_stat{queries_number}; if ($_overall_stat{'first_log_ts'}) { $overall_stat{'first_log_ts'} = $_overall_stat{'first_log_ts'} if (!$overall_stat{'first_log_ts'} || ($overall_stat{'first_log_ts'} gt $_overall_stat{'first_log_ts'})); } $overall_stat{'last_log_ts'} = $_overall_stat{'last_log_ts'} if not $overall_stat{'last_log_ts'} or $overall_stat{'last_log_ts'} lt $_overall_stat{'last_log_ts'}; if ($_overall_stat{'first_query_ts'}) { $overall_stat{'first_query_ts'} = $_overall_stat{'first_query_ts'} if (!$overall_stat{'first_query_ts'} || ($overall_stat{'first_query_ts'} gt $_overall_stat{'first_query_ts'})); } $overall_stat{'last_query_ts'} = $_overall_stat{'last_query_ts'} if not $overall_stat{'last_query_ts'} or $overall_stat{'last_query_ts'} lt $_overall_stat{'last_query_ts'}; $overall_stat{errors_number} += $_overall_stat{errors_number}; $overall_stat{queries_duration} += $_overall_stat{queries_duration}; foreach my $a (@SQL_ACTION) { $overall_stat{$a} += $_overall_stat{$a} if exists $_overall_stat{$a}; } $overall_checkpoint{checkpoint_warning} += $_overall_checkpoint{checkpoint_warning}; $overall_checkpoint{checkpoint_write} = $_overall_checkpoint{checkpoint_write} if ($_overall_checkpoint{checkpoint_write} > $overall_checkpoint{checkpoint_write}); $overall_checkpoint{checkpoint_sync} = $_overall_checkpoint{checkpoint_sync} if ($_overall_checkpoint{checkpoint_sync} > $overall_checkpoint{checkpoint_sync}); foreach my $k (keys %{$_overall_stat{peak}}) { $overall_stat{peak}{$k}{query} += $_overall_stat{peak}{$k}{query}; $overall_stat{peak}{$k}{select} += $_overall_stat{peak}{$k}{select}; $overall_stat{peak}{$k}{write} += $_overall_stat{peak}{$k}{write}; $overall_stat{peak}{$k}{connection} += $_overall_stat{peak}{$k}{connection}; $overall_stat{peak}{$k}{session} += $_overall_stat{peak}{$k}{session}; $overall_stat{peak}{$k}{tempfile_size} += $_overall_stat{peak}{$k}{tempfile_size}; $overall_stat{peak}{$k}{tempfile_count} += $_overall_stat{peak}{$k}{tempfile_count}; $overall_stat{peak}{$k}{cancelled_size} += $_overall_stat{peak}{$k}{cancelled_size}; $overall_stat{peak}{$k}{cancelled_count} += $_overall_stat{peak}{$k}{cancelled_count}; } foreach my $k (keys %{$_overall_stat{histogram}{query_time}}) { $overall_stat{histogram}{query_time}{$k} += $_overall_stat{histogram}{query_time}{$k}; } $overall_stat{histogram}{query_total} += $_overall_stat{histogram}{total}; $overall_stat{histogram}{query_total} += $_overall_stat{histogram}{query_total}; foreach my $k (keys %{$_overall_stat{histogram}{session_time}}) { $overall_stat{histogram}{session_time}{$k} += $_overall_stat{histogram}{session_time}{$k}; } $overall_stat{histogram}{session_total} += $_overall_stat{histogram}{session_total}; foreach my $k ('prepare', 'bind','execute') { $overall_stat{$k} += $_overall_stat{$k}; } foreach my $k (keys %{$_overall_checkpoint{peak}}) { $overall_checkpoint{peak}{$k}{checkpoint_wbuffer} += $_overall_checkpoint{peak}{$k}{checkpoint_wbuffer}; $overall_checkpoint{peak}{$k}{walfile_usage} += $_overall_checkpoint{peak}{$k}{walfile_usage}; } ### pgbouncer related overall stats ### foreach my $k (keys %{$_pgb_overall_stat{peak}}) { $pgb_overall_stat{peak}{$k}{connection} += $_pgb_overall_stat{peak}{$k}{connection}; $pgb_overall_stat{peak}{$k}{session} += $_pgb_overall_stat{peak}{$k}{session}; $pgb_overall_stat{peak}{$k}{t_req} += $_pgb_overall_stat{peak}{$k}{t_req}; $pgb_overall_stat{peak}{$k}{t_inbytes} += $_pgb_overall_stat{peak}{$k}{t_inbytes}; $pgb_overall_stat{peak}{$k}{t_outbytes} += $_pgb_overall_stat{peak}{$k}{t_outbytes}; $pgb_overall_stat{peak}{$k}{t_avgduration} += $_pgb_overall_stat{peak}{$k}{t_avgduration}; } foreach my $k (keys %{$_pgb_overall_stat{histogram}{session_time}}) { $pgb_overall_stat{histogram}{session_time}{$k} += $_pgb_overall_stat{histogram}{session_time}{$k}; } $pgb_overall_stat{histogram}{session_total} += $_pgb_overall_stat{histogram}{session_total}; $pgb_overall_stat{errors_number} += $_pgb_overall_stat{errors_number}; ### Logs level ### foreach my $l (qw(LOG WARNING ERROR FATAL PANIC DETAIL HINT STATEMENT CONTEXT)) { $logs_type{$l} += $_logs_type{$l} if exists $_logs_type{$l}; } ### Errors code ### foreach my $c (keys %_errors_code) { $errors_code{$c} += $_errors_code{$c}; } ### database_info ### foreach my $db (keys %_database_info) { foreach my $k (keys %{ $_database_info{$db} }) { $database_info{$db}{$k} += $_database_info{$db}{$k}; } } ### application_info ### foreach my $app (keys %_application_info) { foreach my $k (keys %{ $_application_info{$app} }) { $application_info{$app}{$k} += $_application_info{$app}{$k}; } } ### user_info ### foreach my $u (keys %_user_info) { foreach my $k (keys %{ $_user_info{$u} }) { $user_info{$u}{$k} += $_user_info{$u}{$k}; } } ### host_info ### foreach my $h (keys %_host_info) { foreach my $k (keys %{ $_host_info{$h} }) { $host_info{$h}{$k} += $_host_info{$h}{$k}; } } ### connection_info ### foreach my $db (keys %{ $_connection_info{database} }) { $connection_info{database}{$db} += $_connection_info{database}{$db}; } foreach my $db (keys %{ $_connection_info{database_user} }) { foreach my $user (keys %{ $_connection_info{database_user}{$db} }) { $connection_info{database_user}{$db}{$user} += $_connection_info{database_user}{$db}{$user}; } } foreach my $user (keys %{ $_connection_info{user} }) { $connection_info{user}{$user} += $_connection_info{user}{$user}; } foreach my $host (keys %{ $_connection_info{host} }) { $connection_info{host}{$host} += $_connection_info{host}{$host}; } $connection_info{count} += $_connection_info{count}; foreach my $day (keys %{ $_connection_info{chronos} }) { foreach my $hour (keys %{ $_connection_info{chronos}{$day} }) { $connection_info{chronos}{$day}{$hour}{count} += $_connection_info{chronos}{$day}{$hour}{count} ############################################################################### # May be used in the future to display more detailed information on connection # # foreach my $db (keys %{ $_connection_info{chronos}{$day}{$hour}{database} }) { # $connection_info{chronos}{$day}{$hour}{database}{$db} += $_connection_info{chronos}{$day}{$hour}{database}{$db}; # } # # foreach my $db (keys %{ $_connection_info{chronos}{$day}{$hour}{database_user} }) { # foreach my $user (keys %{ $_connection_info{chronos}{$day}{$hour}{database_user}{$db} }) { # $connection_info{chronos}{$day}{$hour}{database_user}{$db}{$user} += # $_connection_info{chronos}{$day}{$hour}{database_user}{$db}{$user}; # } # } # # foreach my $user (keys %{ $_connection_info{chronos}{$day}{$hour}{user} }) { # $connection_info{chronos}{$day}{$hour}{user}{$user} += # $_connection_info{chronos}{$day}{$hour}{user}{$user}; # } # # foreach my $host (keys %{ $_connection_info{chronos}{$day}{$hour}{host} }) { # $connection_info{chronos}{$day}{$hour}{host}{$host} += # $_connection_info{chronos}{$day}{$hour}{host}{$host}; # } ############################################################################### } } ### pgbouncer connection_info ### foreach my $db (keys %{ $_pgb_connection_info{database} }) { $pgb_connection_info{database}{$db} += $_pgb_connection_info{database}{$db}; } foreach my $db (keys %{ $_pgb_connection_info{database_user} }) { foreach my $user (keys %{ $_pgb_connection_info{database_user}{$db} }) { $pgb_connection_info{database_user}{$db}{$user} += $_pgb_connection_info{database_user}{$db}{$user}; } } foreach my $user (keys %{ $_pgb_connection_info{user} }) { $pgb_connection_info{user}{$user} += $_pgb_connection_info{user}{$user}; } foreach my $host (keys %{ $_pgb_connection_info{host} }) { $pgb_connection_info{host}{$host} += $_pgb_connection_info{host}{$host}; } $pgb_connection_info{count} += $_pgb_connection_info{count}; foreach my $day (keys %{ $_pgb_connection_info{chronos} }) { foreach my $hour (keys %{ $_pgb_connection_info{chronos}{$day} }) { $pgb_connection_info{chronos}{$day}{$hour}{count} += $_pgb_connection_info{chronos}{$day}{$hour}{count} } } ### log_files ### foreach my $f (@_log_files) { push(@log_files, $f) if (!grep(m#^$f$#, @_log_files)); } ### error_info ### foreach my $q (keys %_error_info) { $error_info{$q}{count} += $_error_info{$q}{count}; foreach my $day (keys %{ $_error_info{$q}{chronos} }) { foreach my $hour (keys %{$_error_info{$q}{chronos}{$day}}) { $error_info{$q}{chronos}{$day}{$hour}{count} += $_error_info{$q}{chronos}{$day}{$hour}{count}; foreach my $min (keys %{$_error_info{$q}{chronos}{$day}{$hour}{min}}) { $error_info{$q}{chronos}{$day}{$hour}{min}{$min} += $_error_info{$q}{chronos}{$day}{$hour}{min}{$min}; } } } for (my $i = 0; $i <= $#{$_error_info{$q}{date}}; $i++) { &set_top_error_sample( $q, $_error_info{$q}{date}[$i], $_error_info{$q}{error}[$i], $_error_info{$q}{detail}[$i], $_error_info{$q}{context}[$i], $_error_info{$q}{statement}[$i], $_error_info{$q}{hint}[$i], $_error_info{$q}{db}[$i], $_error_info{$q}{user}[$i], $_error_info{$q}{app}[$i], $_error_info{$q}{remote}[$i], $_error_info{$q}{sqlstate}[$i] ); } } ### pgbouncer error_info ### foreach my $q (keys %_pgb_error_info) { $pgb_error_info{$q}{count} += $_pgb_error_info{$q}{count}; foreach my $day (keys %{ $_pgb_error_info{$q}{chronos} }) { foreach my $hour (keys %{$_pgb_error_info{$q}{chronos}{$day}}) { $pgb_error_info{$q}{chronos}{$day}{$hour}{count} += $_pgb_error_info{$q}{chronos}{$day}{$hour}{count}; foreach my $min (keys %{$_pgb_error_info{$q}{chronos}{$day}{$hour}{min}}) { $pgb_error_info{$q}{chronos}{$day}{$hour}{min}{$min} += $_pgb_error_info{$q}{chronos}{$day}{$hour}{min}{$min}; } } } for (my $i = 0; $i <= $#{$_pgb_error_info{$q}{date}}; $i++) { &pgb_set_top_error_sample( $q, $_pgb_error_info{$q}{date}[$i], $_pgb_error_info{$q}{error}[$i], $_pgb_error_info{$q}{db}[$i], $_pgb_error_info{$q}{user}[$i], $_pgb_error_info{$q}{remote}[$i] ); } } ### pgbouncer pool_info ### foreach my $q (keys %_pgb_pool_info) { $pgb_pool_info{$q}{count} += $_pgb_pool_info{$q}{count}; foreach my $day (keys %{ $_pgb_pool_info{$q}{chronos} }) { foreach my $hour (keys %{$_pgb_pool_info{$q}{chronos}{$day}}) { $pgb_pool_info{$q}{chronos}{$day}{$hour}{count} += $_pgb_pool_info{$q}{chronos}{$day}{$hour}{count}; foreach my $min (keys %{$_pgb_pool_info{$q}{chronos}{$day}{$hour}{min}}) { $pgb_pool_info{$q}{chronos}{$day}{$hour}{min}{$min} += $_pgb_pool_info{$q}{chronos}{$day}{$hour}{min}{$min}; } } } } ### per_minute_info ### foreach my $day (keys %_per_minute_info) { foreach my $hour (keys %{ $_per_minute_info{$day} }) { foreach my $min (keys %{ $_per_minute_info{$day}{$hour} }) { $per_minute_info{$day}{$hour}{$min}{connection}{count} += ($_per_minute_info{$day}{$hour}{$min}{connection}{count} || 0); $per_minute_info{$day}{$hour}{$min}{session}{count} += ($_per_minute_info{$day}{$hour}{$min}{session}{count} || 0); $per_minute_info{$day}{$hour}{$min}{query}{count} += ($_per_minute_info{$day}{$hour}{$min}{query}{count} || 0); $per_minute_info{$day}{$hour}{$min}{query}{duration} += $_per_minute_info{$day}{$hour}{$min}{query}{duration}; $per_minute_info{$day}{$hour}{$min}{query}{min} = $_per_minute_info{$day}{$hour}{$min}{query}{min} if (!exists $per_minute_info{$day}{$hour}{$min}{query}{min} || ($per_minute_info{$day}{$hour}{$min}{query}{min} > $_per_minute_info{$day}{$hour}{$min}{query}{min})); $per_minute_info{$day}{$hour}{$min}{query}{max} = $_per_minute_info{$day}{$hour}{$min}{query}{max} if (!exists $per_minute_info{$day}{$hour}{$min}{query}{max} || ($per_minute_info{$day}{$hour}{$min}{query}{max} < $_per_minute_info{$day}{$hour}{$min}{query}{max})); foreach my $sec (keys %{ $_per_minute_info{$day}{$hour}{$min}{connection}{second} }) { $per_minute_info{$day}{$hour}{$min}{connection}{second}{$sec} += ($_per_minute_info{$day}{$hour}{$min}{connection}{second}{$sec} || 0); } foreach my $sec (keys %{ $_per_minute_info{$day}{$hour}{$min}{session}{second} }) { $per_minute_info{$day}{$hour}{$min}{session}{second}{$sec} += ($_per_minute_info{$day}{$hour}{$min}{session}{second}{$sec} || 0); } foreach my $sec (keys %{ $_per_minute_info{$day}{$hour}{$min}{query}{second} }) { $per_minute_info{$day}{$hour}{$min}{query}{second}{$sec} += ($_per_minute_info{$day}{$hour}{$min}{query}{second}{$sec} || 0); } foreach my $action (@SQL_ACTION) { next if (!exists $_per_minute_info{$day}{$hour}{$min}{$action}); $per_minute_info{$day}{$hour}{$min}{$action}{count} += ($_per_minute_info{$day}{$hour}{$min}{$action}{count} || 0); $per_minute_info{$day}{$hour}{$min}{$action}{duration} += ($_per_minute_info{$day}{$hour}{$min}{$action}{duration} || 0); foreach my $sec (keys %{ $_per_minute_info{$day}{$hour}{$min}{$action}{second} }) { $per_minute_info{$day}{$hour}{$min}{$action}{second}{$sec} += ($_per_minute_info{$day}{$hour}{$min}{$action}{second}{$sec} || 0); } } foreach my $k ('prepare', 'bind','execute') { if (exists $_per_minute_info{$day}{$hour}{$min}{$k}) { $per_minute_info{$day}{$hour}{$min}{$k} += $_per_minute_info{$day}{$hour}{$min}{$k}; } } foreach my $log (keys %{ $_per_minute_info{$day}{$hour}{$min}{log_level} }) { $per_minute_info{$day}{$hour}{$min}{log_level}{$log} += ($_per_minute_info{$day}{$hour}{$min}{log_level}{$log} || 0); } $per_minute_info{$day}{$hour}{$min}{cancelled}{count} += $_per_minute_info{$day}{$hour}{$min}{cancelled}{count} if defined $_per_minute_info{$day}{$hour}{$min}{cancelled}{count}; $per_minute_info{$day}{$hour}{$min}{tempfile}{count} += $_per_minute_info{$day}{$hour}{$min}{tempfile}{count} if defined $_per_minute_info{$day}{$hour}{$min}{tempfile}{count}; $per_minute_info{$day}{$hour}{$min}{tempfile}{size} += $_per_minute_info{$day}{$hour}{$min}{tempfile}{size} if defined $_per_minute_info{$day}{$hour}{$min}{tempfile}{size}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{file_removed} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{file_removed}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{wbuffer} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{wbuffer}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{file_recycled} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{file_recycled}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{total} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{total}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{file_added} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{file_added}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{write} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{write}; $per_minute_info{$day}{$hour}{$min}{autovacuum}{count} += $_per_minute_info{$day}{$hour}{$min}{autovacuum}{count}; $per_minute_info{$day}{$hour}{$min}{autoanalyze}{count} += $_per_minute_info{$day}{$hour}{$min}{autoanalyze}{count}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_files} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_files}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_avg} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_avg}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} = $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} if ($_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} > $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest}); } } } ### pgbouncer per_minute_info ### foreach my $day (keys %_pgb_per_minute_info) { foreach my $hour (keys %{ $_pgb_per_minute_info{$day} }) { foreach my $min (keys %{ $_pgb_per_minute_info{$day}{$hour} }) { $pgb_per_minute_info{$day}{$hour}{$min}{connection}{count} += ($_pgb_per_minute_info{$day}{$hour}{$min}{connection}{count} || 0); $pgb_per_minute_info{$day}{$hour}{$min}{session}{count} += ($_pgb_per_minute_info{$day}{$hour}{$min}{session}{count} || 0); $pgb_per_minute_info{$day}{$hour}{$min}{t_req} += ($_pgb_per_minute_info{$day}{$hour}{$min}{t_req} || 0); $pgb_per_minute_info{$day}{$hour}{$min}{t_inbytes} += ($_pgb_per_minute_info{$day}{$hour}{$min}{t_inbytes} || 0); $pgb_per_minute_info{$day}{$hour}{$min}{t_outbytes} += ($_pgb_per_minute_info{$day}{$hour}{$min}{t_outbytes} || 0); $pgb_per_minute_info{$day}{$hour}{$min}{t_avgduration} += ($_pgb_per_minute_info{$day}{$hour}{$min}{t_avgduration} || 0); } } } ### lock_info ### foreach my $lock (keys %_lock_info) { $lock_info{$lock}{count} += $_lock_info{$lock}{count}; foreach my $day (keys %{ $_lock_info{chronos} }) { foreach my $hour (keys %{ $_lock_info{chronos}{$day} }) { $lock_info{chronos}{$day}{$hour}{count} += $_lock_info{chronos}{$day}{$hour}{count}; $lock_info{chronos}{$day}{$hour}{duration} += $_lock_info{chronos}{$day}{$hour}{duration}; } } $lock_info{$lock}{duration} += $_lock_info{$lock}{duration}; foreach my $type (keys %{$_lock_info{$lock}}) { next if $type =~ /^(count|chronos|duration)$/; $lock_info{$lock}{$type}{count} += $_lock_info{$lock}{$type}{count}; $lock_info{$lock}{$type}{duration} += $_lock_info{$lock}{$type}{duration}; } } ### nlines ### $nlines += $_nlines; ### normalyzed_info ### foreach my $stmt (keys %_normalyzed_info) { foreach my $dt (keys %{$_normalyzed_info{$stmt}{samples}} ) { foreach my $k (keys %{$_normalyzed_info{$stmt}{samples}{$dt}} ) { $normalyzed_info{$stmt}{samples}{$dt}{$k} = $_normalyzed_info{$stmt}{samples}{$dt}{$k}; } } # Keep only the top N samples my $i = 1; foreach my $k (sort {$b <=> $a} keys %{$normalyzed_info{$stmt}{samples}}) { if ($i > $sample) { delete $normalyzed_info{$stmt}{samples}{$k}; } $i++; } $normalyzed_info{$stmt}{count} += $_normalyzed_info{$stmt}{count}; # Set min / max duration for this query if (!exists $normalyzed_info{$stmt}{min} || ($normalyzed_info{$stmt}{min} > $_normalyzed_info{$stmt}{min})) { $normalyzed_info{$stmt}{min} = $_normalyzed_info{$stmt}{min}; } if (!exists $normalyzed_info{$stmt}{max} || ($normalyzed_info{$stmt}{max} < $_normalyzed_info{$stmt}{max})) { $normalyzed_info{$stmt}{max} = $_normalyzed_info{$stmt}{max}; } foreach my $day (keys %{$_normalyzed_info{$stmt}{chronos}} ) { foreach my $hour (keys %{$_normalyzed_info{$stmt}{chronos}{$day}} ) { $normalyzed_info{$stmt}{chronos}{$day}{$hour}{count} += $_normalyzed_info{$stmt}{chronos}{$day}{$hour}{count}; $normalyzed_info{$stmt}{chronos}{$day}{$hour}{duration} += $_normalyzed_info{$stmt}{chronos}{$day}{$hour}{duration}; foreach my $min (keys %{$_normalyzed_info{$stmt}{chronos}{$day}{$hour}{min}} ) { $normalyzed_info{$stmt}{chronos}{$day}{$hour}{min}{$min} += $_normalyzed_info{$stmt}{chronos}{$day}{$hour}{min}{$min}; } foreach my $min (keys %{$_normalyzed_info{$stmt}{chronos}{$day}{$hour}{min_duration}} ) { $normalyzed_info{$stmt}{chronos}{$day}{$hour}{min_duration}{$min} += $_normalyzed_info{$stmt}{chronos}{$day}{$hour}{min_duration}{$min}; } } } $normalyzed_info{$stmt}{duration} += $_normalyzed_info{$stmt}{duration}; if (exists $_normalyzed_info{$stmt}{locks}) { $normalyzed_info{$stmt}{locks}{count} += $_normalyzed_info{$stmt}{locks}{count}; $normalyzed_info{$stmt}{locks}{wait} += $_normalyzed_info{$stmt}{locks}{wait}; if (!exists $normalyzed_info{$stmt}{locks}{minwait} || ($normalyzed_info{$stmt}{locks}{minwait} > $_normalyzed_info{$stmt}{locks}{minwait})) { $normalyzed_info{$stmt}{locks}{minwait} = $_normalyzed_info{$stmt}{locks}{minwait}; } if (!exists $normalyzed_info{$stmt}{locks}{maxwait} || ($normalyzed_info{$stmt}{locks}{maxwait} < $_normalyzed_info{$stmt}{locks}{maxwait})) { $normalyzed_info{$stmt}{locks}{maxwait} = $_normalyzed_info{$stmt}{locks}{maxwait}; } } if (exists $_normalyzed_info{$stmt}{tempfiles}) { $normalyzed_info{$stmt}{tempfiles}{count} += $_normalyzed_info{$stmt}{tempfiles}{count}; $normalyzed_info{$stmt}{tempfiles}{size} += $_normalyzed_info{$stmt}{tempfiles}{size}; if (!exists $normalyzed_info{$stmt}{tempfiles}{minsize} || ($normalyzed_info{$stmt}{tempfiles}{minsize} > $_normalyzed_info{$stmt}{tempfiles}{minsize})) { $normalyzed_info{$stmt}{tempfiles}{minsize} = $_normalyzed_info{$stmt}{tempfiles}{minsize}; } if (!exists $normalyzed_info{$stmt}{tempfiles}{maxsize} || ($normalyzed_info{$stmt}{tempfiles}{maxsize} < $_normalyzed_info{$stmt}{tempfiles}{maxsize})) { $normalyzed_info{$stmt}{tempfiles}{maxsize} = $_normalyzed_info{$stmt}{tempfiles}{maxsize}; } } if (exists $_normalyzed_info{$stmt}{cancelled}) { $normalyzed_info{$stmt}{cancelled}{count} += $_normalyzed_info{$stmt}{cancelled}{count}; } foreach my $u (keys %{$_normalyzed_info{$stmt}{users}} ) { foreach my $k (keys %{$_normalyzed_info{$stmt}{users}{$u}} ) { $normalyzed_info{$stmt}{users}{$u}{$k} += $_normalyzed_info{$stmt}{users}{$u}{$k}; } } foreach my $u (keys %{$_normalyzed_info{$stmt}{apps}} ) { foreach my $k (keys %{$_normalyzed_info{$stmt}{apps}{$u}} ) { $normalyzed_info{$stmt}{apps}{$u}{$k} += $_normalyzed_info{$stmt}{apps}{$u}{$k}; } } } ### session_info ### foreach my $db (keys %{ $_session_info{database}}) { $session_info{database}{$db}{count} += $_session_info{database}{$db}{count}; $session_info{database}{$db}{duration} += $_session_info{database}{$db}{duration}; } $session_info{count} += $_session_info{count}; foreach my $day (keys %{ $_session_info{chronos}}) { foreach my $hour (keys %{ $_session_info{chronos}{$day}}) { $session_info{chronos}{$day}{$hour}{count} += $_session_info{chronos}{$day}{$hour}{count}; $session_info{chronos}{$day}{$hour}{duration} += $_session_info{chronos}{$day}{$hour}{duration}; } } foreach my $user (keys %{ $_session_info{user}}) { $session_info{user}{$user}{count} += $_session_info{user}{$user}{count}; $session_info{user}{$user}{duration} += $_session_info{user}{$user}{duration}; } $session_info{duration} += $_session_info{duration}; foreach my $host (keys %{ $_session_info{host}}) { $session_info{host}{$host}{count} += $_session_info{host}{$host}{count}; $session_info{host}{$host}{duration} += $_session_info{host}{$host}{duration}; } foreach my $app (keys %{ $_session_info{app}}) { $session_info{app}{$app}{count} += $_session_info{app}{$app}{count}; $session_info{app}{$app}{duration} += $_session_info{app}{$app}{duration}; } ### pgbouncer session_info ### foreach my $db (keys %{ $_pgb_session_info{database}}) { $pgb_session_info{database}{$db}{count} += $_pgb_session_info{database}{$db}{count}; $pgb_session_info{database}{$db}{duration} += $_pgb_session_info{database}{$db}{duration}; } $pgb_session_info{count} += $_pgb_session_info{count}; foreach my $day (keys %{ $_pgb_session_info{chronos}}) { foreach my $hour (keys %{ $_pgb_session_info{chronos}{$day}}) { $pgb_session_info{chronos}{$day}{$hour}{count} += $_pgb_session_info{chronos}{$day}{$hour}{count}; $pgb_session_info{chronos}{$day}{$hour}{duration} += $_pgb_session_info{chronos}{$day}{$hour}{duration}; } } foreach my $user (keys %{ $_pgb_session_info{user}}) { $pgb_session_info{user}{$user}{count} += $_pgb_session_info{user}{$user}{count}; $pgb_session_info{user}{$user}{duration} += $_pgb_session_info{user}{$user}{duration}; } $pgb_session_info{duration} += $_pgb_session_info{duration}; foreach my $host (keys %{ $_pgb_session_info{host}}) { $pgb_session_info{host}{$host}{count} += $_pgb_session_info{host}{$host}{count}; $pgb_session_info{host}{$host}{duration} += $_pgb_session_info{host}{$host}{duration}; } ### tempfile_info ### $tempfile_info{count} += $_tempfile_info{count} if defined $_tempfile_info{count}; $tempfile_info{size} += $_tempfile_info{size} if defined $_tempfile_info{size}; $tempfile_info{maxsize} = $_tempfile_info{maxsize} if defined $_tempfile_info{maxsize} and ( not defined $tempfile_info{maxsize} or $tempfile_info{maxsize} < $_tempfile_info{maxsize} ); ### top_slowest ### my @tmp_top_slowest = sort {$b->[0] <=> $a->[0]} (@top_slowest, @_top_slowest); @top_slowest = (); for (my $i = 0; $i <= $#tmp_top_slowest; $i++) { push(@top_slowest, $tmp_top_slowest[$i]); last if ($i == $end_top); } ### top_locked ### my @tmp_top_locked_info = sort {$b->[0] <=> $a->[0]} (@top_locked_info, @_top_locked_info); @top_locked_info = (); for (my $i = 0; $i <= $#tmp_top_locked_info; $i++) { push(@top_locked_info, $tmp_top_locked_info[$i]); last if ($i == $end_top); } ### top_tempfile ### my @tmp_top_tempfile_info = sort {$b->[0] <=> $a->[0]} (@top_tempfile_info, @_top_tempfile_info); @top_tempfile_info = (); for (my $i = 0; $i <= $#tmp_top_tempfile_info; $i++) { push(@top_tempfile_info, $tmp_top_tempfile_info[$i]); last if ($i == $end_top); } ### checkpoint_info ### $checkpoint_info{file_removed} += $_checkpoint_info{file_removed}; $checkpoint_info{sync} += $_checkpoint_info{sync}; $checkpoint_info{wbuffer} += $_checkpoint_info{wbuffer}; $checkpoint_info{file_recycled} += $_checkpoint_info{file_recycled}; $checkpoint_info{total} += $_checkpoint_info{total}; $checkpoint_info{file_added} += $_checkpoint_info{file_added}; $checkpoint_info{write} += $_checkpoint_info{write}; #### Autovacuum info #### $autovacuum_info{count} += $_autovacuum_info{count}; foreach my $day (keys %{ $_autovacuum_info{chronos} }) { foreach my $hour (keys %{ $_autovacuum_info{chronos}{$day} }) { $autovacuum_info{chronos}{$day}{$hour}{count} += $_autovacuum_info{chronos}{$day}{$hour}{count}; } } foreach my $table (keys %{ $_autovacuum_info{tables} }) { $autovacuum_info{tables}{$table}{vacuums} += $_autovacuum_info{tables}{$table}{vacuums}; $autovacuum_info{tables}{$table}{idxscans} += $_autovacuum_info{tables}{$table}{idxscans}; $autovacuum_info{tables}{$table}{tuples}{removed} += $_autovacuum_info{tables}{$table}{tuples}{removed}; $autovacuum_info{tables}{$table}{pages}{removed} += $_autovacuum_info{tables}{$table}{pages}{removed}; } if ($_autovacuum_info{peak}{system_usage}{elapsed} > $autovacuum_info{peak}{system_usage}{elapsed}) { $autovacuum_info{peak}{system_usage}{elapsed} = $_autovacuum_info{peak}{system_usage}{elapsed}; $autovacuum_info{peak}{system_usage}{table} = $_autovacuum_info{peak}{system_usage}{table}; $autovacuum_info{peak}{system_usage}{date} = $_autovacuum_info{peak}{system_usage}{date}; } #### Autoanalyze info #### $autoanalyze_info{count} += $_autoanalyze_info{count}; foreach my $day (keys %{ $_autoanalyze_info{chronos} }) { foreach my $hour (keys %{ $_autoanalyze_info{chronos}{$day} }) { $autoanalyze_info{chronos}{$day}{$hour}{count} += $_autoanalyze_info{chronos}{$day}{$hour}{count}; } } foreach my $table (keys %{ $_autoanalyze_info{tables} }) { $autoanalyze_info{tables}{$table}{analyzes} += $_autoanalyze_info{tables}{$table}{analyzes}; } if ($_autoanalyze_info{peak}{system_usage}{elapsed} > $autoanalyze_info{peak}{system_usage}{elapsed}) { $autoanalyze_info{peak}{system_usage}{elapsed} = $_autoanalyze_info{peak}{system_usage}{elapsed}; $autoanalyze_info{peak}{system_usage}{table} = $_autoanalyze_info{peak}{system_usage}{table}; $autoanalyze_info{peak}{system_usage}{date} = $_autoanalyze_info{peak}{system_usage}{date}; } return; } # Function used to dump all relevant objects in memory to a single binary file sub dump_as_binary { my $lfh = shift(); store_fd({ 'overall_stat' => \%overall_stat, 'pgb_overall_stat' => \%pgb_overall_stat, 'overall_checkpoint' => \%overall_checkpoint, 'normalyzed_info' => \%normalyzed_info, 'error_info' => \%error_info, 'pgb_error_info' => \%pgb_error_info, 'pgb_pool_info' => \%pgb_pool_info, 'connection_info' => \%connection_info, 'pgb_connection_info' => \%pgb_connection_info, 'database_info' => \%database_info, 'application_info' => \%application_info, 'user_info' => \%user_info, 'host_info' => \%host_info, 'checkpoint_info' => \%checkpoint_info, 'session_info' => \%session_info, 'pgb_session_info' => \%pgb_session_info, 'tempfile_info' => \%tempfile_info, 'logs_type' => \%logs_type, 'lock_info' => \%lock_info, 'per_minute_info' => \%per_minute_info, 'pgb_per_minute_info' => \%pgb_per_minute_info, 'top_slowest' => \@top_slowest, 'nlines' => $nlines, 'log_files' => \@log_files, 'autovacuum_info' => \%autovacuum_info, 'autoanalyze_info' => \%autoanalyze_info, 'top_tempfile_info' => \@top_tempfile_info, 'top_locked_info' => \@top_locked_info, }, $lfh) || localdie ("Couldn't save binary data to «$outfile»!\n"); } sub dump_error_as_json { my $json = encode_json({ 'error_info' => \%error_info, }) || localdie ("Encode object to JSON failed!\n"); print $fh $json; } sub dump_as_json { my $json = encode_json({ 'overall_stat' => \%overall_stat, 'pgb_overall_stat' => \%pgb_overall_stat, 'overall_checkpoint' => \%overall_checkpoint, 'normalyzed_info' => \%normalyzed_info, 'error_info' => \%error_info, 'pgb_error_info' => \%pgb_error_info, 'pgb_pool_info' => \%pgb_pool_info, 'connection_info' => \%connection_info, 'pgb_connection_info' => \%pgb_connection_info, 'database_info' => \%database_info, 'application_info' => \%application_info, 'user_info' => \%user_info, 'host_info' => \%host_info, 'checkpoint_info' => \%checkpoint_info, 'session_info' => \%session_info, 'pgb_session_info' => \%pgb_session_info, 'tempfile_info' => \%tempfile_info, 'logs_type' => \%logs_type, 'lock_info' => \%lock_info, 'per_minute_info' => \%per_minute_info, 'pgb_per_minute_info' => \%pgb_per_minute_info, 'top_slowest' => \@top_slowest, 'nlines' => $nlines, 'log_files' => \@log_files, 'autovacuum_info' => \%autovacuum_info, 'autoanalyze_info' => \%autoanalyze_info, 'top_tempfile_info' => \@top_tempfile_info, 'top_locked_info' => \@top_locked_info, }) || localdie ("Encode object to JSON failed!\n"); print $fh $json; } # Highlight SQL code sub highlight_code { my $code = shift; # Escape HTML code into SQL values $code = &escape_html($code); # Do not try to prettify queries longer # than 10KB as this will take too much time return $code if (length($code) > $MAX_QUERY_LENGTH); # prettify SQL query if (!$noprettify) { $sql_prettified->query($code); $code = $sql_prettified->beautify; } return $code if ($nohighlight); my $i = 0; my @qqcode = (); while ($code =~ s/("[^\"]*")/QQCODEY${i}A/s) { push(@qqcode, $1); $i++; } $i = 0; my @qcode = (); while ($code =~ s/('[^\']*')/QCODEY${i}B/s) { push(@qcode, $1); $i++; } foreach my $x (sort keys %SYMBOLS) { $code =~ s/$x/\$\$PGBGYA\$\$$SYMBOLS{$x}\$\$PGBGYB\$\$/gs; } for (my $x = 0 ; $x <= $#KEYWORDS1 ; $x++) { #$code =~ s/\b$KEYWORDS1[$x]\b/$KEYWORDS1[$x]<\/span>/igs; $code =~ s/(?$KEYWORDS1[$x]<\/span>/igs; } for (my $x = 0 ; $x <= $#KEYWORDS2 ; $x++) { $code =~ s/(?$KEYWORDS2[$x]<\/span>/igs; } for (my $x = 0 ; $x <= $#KEYWORDS3 ; $x++) { $code =~ s/\b$KEYWORDS3[$x]\b/$KEYWORDS3[$x]<\/span>/igs; } for (my $x = 0 ; $x <= $#BRACKETS ; $x++) { $code =~ s/($BRACKETS[$x])/$1<\/span>/igs; } $code =~ s/\$\$PGBGYA\$\$([^\$]+)\$\$PGBGYB\$\$/$1<\/span>/gs; $code =~ s/\b(\d+)\b/$1<\/span>/igs; for (my $x = 0; $x <= $#qcode; $x++) { $code =~ s/QCODEY${x}B/$qcode[$x]/s; } for (my $x = 0; $x <= $#qqcode; $x++) { $code =~ s/QQCODEY${x}A/$qqcode[$x]/s; } $code =~ s/('[^']*')/$1<\/span>/gs; $code =~ s/(`[^`]*`)/$1<\/span>/gs; $code =~ s/\$\$PGBGY(A|B)\$\$//gs; return $code; } sub compute_arg_list { # Some command line arguments can be used multiple times or written # as a comma-separated list. # For example: --dbuser=postgres --dbuser=joe or --dbuser=postgres,joe # So we have to aggregate all the possible values my @tmp = (); foreach my $v (@exclude_user) { push(@tmp, split(/,/, $v)); } @exclude_user = (); push(@exclude_user, @tmp); @tmp = (); foreach my $v (@dbname) { push(@tmp, split(/,/, $v)); } @dbname = (); push(@dbname, @tmp); @tmp = (); foreach my $v (@dbuser) { push(@tmp, split(/,/, $v)); } @dbuser = (); push(@dbuser, @tmp); @tmp = (); foreach my $v (@dbclient) { push(@tmp, split(/,/, $v)); } @dbclient = (); push(@dbclient, @tmp); @tmp = (); foreach my $v (@dbappname) { push(@tmp, split(/,/, $v)); } @dbappname = (); push(@dbappname, @tmp); @tmp = (); foreach my $v (@exclude_appname) { push(@tmp, split(/,/, $v)); } @exclude_appname = (); push(@exclude_appname, @tmp); @tmp = (); foreach my $v (@exclude_line) { push(@tmp, split(/,/, $v)); } @exclude_line = (); push(@exclude_line, @tmp); } sub validate_log_line { my ($t_pid) = @_; # Set details about connection if prefix doesn't included them # and log_connection is enabled if ($prefix_vars{'t_loglevel'} eq 'LOG') { if ( !$prefix_vars{'t_client'} && ($prefix_vars{'t_query'} =~ /connection received: host=([^\s]+)(?: port=(\d+))?/) ) { $current_sessions{$prefix_vars{'t_pid'}}{host} = $1; return 1; } elsif ( !$prefix_vars{'t_dbname'} && ($prefix_vars{'t_query'} =~ /connection authorized: user=([^\s]+)(?: database=([^\s]+))?/) ) { $current_sessions{$prefix_vars{'t_pid'}}{user} = $1; $current_sessions{$prefix_vars{'t_pid'}}{database} = $2; } } # Set details from previous connection line $prefix_vars{'t_dbname'} = $current_sessions{$t_pid}{database} if (!$prefix_vars{'t_dbname'} && exists $current_sessions{$t_pid}{database}); $prefix_vars{'t_dbuser'} = $current_sessions{$t_pid}{user} if (!$prefix_vars{'t_user'} && exists $current_sessions{$t_pid}{user}); $prefix_vars{'t_client'} = $current_sessions{$t_pid}{host} if (!$prefix_vars{'t_client'} && exists $current_sessions{$t_pid}{host}); # Clear storage, the authorized session will be reparsed after delete $current_sessions{$prefix_vars{'t_pid'}}; # Look at particular cases of vacuum/analyze that have the database # name inside the log message so that they could be associated if ($prefix_vars{'t_query'} =~ / of table "([^\.]+)\.[^\.]+\.[^\.]+":/) { $prefix_vars{'t_dbname'} = $1; } # Check user and/or database if required if ($#dbname >= 0) { # Log line does not match the required dbname if (!$prefix_vars{'t_dbname'} || !grep(/^$prefix_vars{'t_dbname'}$/i, @dbname)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } if ($#dbuser >= 0) { # Log line does not match the required dbuser if (!$prefix_vars{'t_dbuser'} || !grep(/^$prefix_vars{'t_dbuser'}$/i, @dbuser)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } if ($#dbclient >= 0) { # Log line does not match the required dbclient if (!$prefix_vars{'t_client'} || !grep(/^$prefix_vars{'t_client'}$/i, @dbclient)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } if ($#dbappname >= 0) { # Log line does not match the required dbname if (!$prefix_vars{'t_appname'} || !grep(/^\Q$prefix_vars{'t_appname'}\E$/i, @dbappname)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } if ($#exclude_user >= 0) { # Log line matches the excluded dbuser if ($prefix_vars{'t_dbuser'} && grep(/^$prefix_vars{'t_dbuser'}$/i, @exclude_user)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } if ($#exclude_appname >= 0) { # Log line matches the excluded appname if ($prefix_vars{'t_appname'} && grep(/^\Q$prefix_vars{'t_appname'}\E$/i, @exclude_appname)) { delete $current_sessions{$prefix_vars{'t_pid'}}; return 0; } } return 1; } sub parse_log_prefix { my ($t_logprefix) = @_; # Extract user and database information from the logprefix part if ($t_logprefix) { # Search for database user if ($t_logprefix =~ $regex_prefix_dbuser) { $prefix_vars{'t_dbuser'} = $1; } # Search for database name if ($t_logprefix =~ $regex_prefix_dbname) { $prefix_vars{'t_dbname'} = $1; } # Search for client host name if ($t_logprefix =~ $regex_prefix_dbclient) { $prefix_vars{'t_dbclient'} = $1; $prefix_vars{'t_dbclient'} = _gethostbyaddr($prefix_vars{'t_dbclient'}) if ($dns_resolv); } # Search for application name if ($t_logprefix =~ $regex_prefix_dbappname) { $prefix_vars{'t_appname'} = $1; } } } sub parse_query { my $fmt = shift; my $t_pid = $prefix_vars{'t_pid'}; my $date_part = "$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}"; my $cur_last_log_timestamp = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} " . "$prefix_vars{t_hour}:$prefix_vars{t_min}:$prefix_vars{t_sec}"; # Force some LOG messages to be ERROR messages so that they will appear # in the event/error/warning messages report. if ($prefix_vars{'t_loglevel'} eq 'LOG') { $prefix_vars{'t_loglevel'} = 'ERROR' if (&change_log_level($prefix_vars{'t_query'})); } elsif (($prefix_vars{'t_loglevel'} eq 'ERROR') && !$error_only) { if ($prefix_vars{'t_query'} =~ /canceling statement due to statement timeout/) { # Stores cancelled queries return if ($disable_query); $cancelled_info{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{cancelled}{count}++; # Store current cancelled query information that will be used later # when we will parse the query that has been killed $overall_stat{'peak'}{$cur_last_log_timestamp}{cancelled_count}++; $cur_cancel_info{$t_pid}{count}++; if ($fmt eq 'csv') { $cur_cancel_info{$t_pid}{query} = $prefix_vars{'t_statement'}; } $cur_cancel_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; $cur_cancel_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'}; $cur_cancel_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_cancel_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $cur_cancel_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; } elsif (exists $cur_cancel_info{$t_pid}) { &store_temporary_and_lock_infos($t_pid); } } # Remove session failure from current workload because there is no disconnection entry if (!$disable_session && ($prefix_vars{'t_loglevel'} eq 'FATAL')) { delete $current_sessions{$prefix_vars{'t_pid'}}; if ($extension eq 'tsung') { delete $tsung_session{$prefix_vars{'t_pid'}} } } elsif (!$disable_session && ($prefix_vars{'t_loglevel'} eq 'WARNING')) { if ($prefix_vars{'t_query'} =~ /terminating connection/) { delete $current_sessions{$prefix_vars{'t_pid'}}; if ($extension eq 'tsung') { delete $tsung_session{$prefix_vars{'t_pid'}} } } } # Do not process DEALLOCATE lines if ($prefix_vars{'t_query'} =~ /statement: DEALLOCATE/) { return; } # Store a counter of logs type $logs_type{$prefix_vars{'t_loglevel'}}++; # Do not parse lines that are not an error message when error only report is requested if ($error_only && ($prefix_vars{'t_loglevel'} !~ $full_error_regex)) { return; } # Do not parse lines that are an error-like message when error reports are not wanted if ($disable_error && ($prefix_vars{'t_loglevel'} =~ $full_error_regex)) { return; } # Replace syslog tabulation rewrite if ($fmt =~ /syslog/) { $prefix_vars{'t_query'} =~ s/#011/\t/g; } # Save previous temporary file information with same pid to not overwrite it if ($prefix_vars{'t_loglevel'} =~ $main_log_regex) { if (($prefix_vars{'t_query'} !~ /temporary file: path .*, size \d+/) && exists $cur_temp_info{$t_pid}) { &store_temporary_and_lock_infos($t_pid); } if (($prefix_vars{'t_query'} !~ /acquired [^\s]+ on [^\s]+ .* after [0-9\.]+ ms/) && exists $cur_lock_info{$t_pid}) { &store_temporary_and_lock_infos($t_pid); } } # Stores the error's detail if previous line was an error if (($prefix_vars{'t_loglevel'} =~ /(DETAIL|STATEMENT|CONTEXT|HINT)/) && ($cur_info{$t_pid}{loglevel} =~ $main_error_regex)) { if (($prefix_vars{'t_loglevel'} ne 'DETAIL') || ($prefix_vars{'t_query'} !~ /parameters: (.*)/)) { # Store error details only if this is not a parameter list (never present with errors) $cur_info{$t_pid}{"\L$prefix_vars{'t_loglevel'}\E"} .= $prefix_vars{'t_query'}; } if (($prefix_vars{'t_loglevel'} eq 'STATEMENT') && exists $cur_temp_info{$t_pid} && ($cur_temp_info{$t_pid}{query} eq '')) { $cur_temp_info{$t_pid}{query} = $prefix_vars{'t_query'}; } return; } # Special cases when a STATEMENT is parsed if ($prefix_vars{'t_loglevel'} eq 'STATEMENT') { # Stores temporary file statement if a temporary record with the same pid exists if ( exists $cur_temp_info{$t_pid}{size}) { # Store query of the last temporary file found. $cur_temp_info{$t_pid}{query} = $prefix_vars{'t_query'}; return; } # Stores query related to last lock information if (exists $cur_lock_info{$t_pid}{wait}) { $cur_lock_info{$t_pid}{query} = $prefix_vars{'t_query'}; $cur_lock_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; $cur_lock_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'}; $cur_lock_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_lock_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $cur_lock_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; return; } # Stores query related to cancelled queries information if (exists $cur_cancel_info{$t_pid}{count}) { $cur_cancel_info{$t_pid}{query} = $prefix_vars{'t_query'}; $cur_cancel_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; $cur_cancel_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'}; $cur_cancel_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_cancel_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $cur_cancel_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; return; } } # set current session workload if ( !$disable_session ) { my $sess_count = scalar keys %current_sessions; $overall_stat{'peak'}{$cur_last_log_timestamp}{session} = $sess_count; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{session}{count} = $sess_count; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{session}{second}{$prefix_vars{'t_sec'}} = $sess_count; } # Stores lock activity if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /acquired ([^\s]+) on ([^\s]+) .* after ([0-9\.]+) ms/)) { return if ($disable_lock); $lock_info{$1}{count}++; $lock_info{$1}{duration} += $3; $lock_info{$1}{$2}{count}++; $lock_info{$1}{$2}{duration} += $3; $lock_info{$1}{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $lock_info{$1}{chronos}{$date_part}{$prefix_vars{'t_hour'}}{duration}++; # Store current lock information that will be used later # when we will parse the query responsible for the locks $cur_lock_info{$t_pid}{wait} = $3; if ($fmt eq 'csv') { $cur_lock_info{$t_pid}{query} = $prefix_vars{'t_statement'}; $cur_lock_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; $cur_lock_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'}; $cur_lock_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_lock_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $cur_lock_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; } return; } # Stores temporary files activity if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /temporary file: path (.*), size (\d+)/)) { return if ($disable_temporary); my $filepath = $1; my $size = $2; $tempfile_info{count}++; $tempfile_info{size} += $size; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{tempfile}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{tempfile}{size} += $size; # Store current temporary file information that will be used later # when we will parse the query responsible for the tempfile $cur_temp_info{$t_pid}{size} += $size; $cur_temp_info{$t_pid}{query} = ''; # initialyze the query $tempfile_info{maxsize} = $cur_temp_info{$t_pid}{size} if ($tempfile_info{maxsize} < $cur_temp_info{$t_pid}{size}); $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_size} += $size; $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_count}++; if ($fmt eq 'csv') { $cur_temp_info{$t_pid}{query} = $prefix_vars{'t_statement'}; } # Stores information related to first created temporary file if (!exists $cur_temp_info{$t_pid}{timestamp}) { $cur_temp_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'} || ''; $cur_temp_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'} || ''; $cur_temp_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'} || ''; $cur_temp_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $cur_temp_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'} || ''; } return; } # Stores pre-connection activity if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /connection received: host=([^\s]+)(?: port=(\d+))?/)) { $current_sessions{$prefix_vars{'t_pid'}}{host} = $1; return if ($disable_connection); $conn_received{$t_pid} = $1; $conn_received{$t_pid} = _gethostbyaddr($conn_received{$t_pid}) if ($dns_resolv); return; } # Stores connection activity if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /connection authorized: user=([^\s]+)(?: database=([^\s]+))?/)) { $current_sessions{$prefix_vars{'t_pid'}}{user} = $1; $current_sessions{$prefix_vars{'t_pid'}}{database} = $2; return if ($disable_connection); my $usr = $1; my $db = 'unknown'; my $host = ''; if ($prefix_vars{'t_query'} =~ / database=([^\s]+)/) { $db = $1; } elsif ($prefix_vars{'t_dbname'}) { $db = $prefix_vars{'t_dbname'}; } if ($prefix_vars{'t_query'} =~ / host=([^\s]+)/) { $host = $1; $host = _gethostbyaddr($host) if ($dns_resolv); } elsif ($prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}) { $host = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'}; $host = _gethostbyaddr($host) if ($dns_resolv); } if ($extension eq 'tsung') { $tsung_session{$prefix_vars{'t_pid'}}{connection}{database} = $db; $tsung_session{$prefix_vars{'t_pid'}}{connection}{user} = $usr; $tsung_session{$prefix_vars{'t_pid'}}{connection}{date} = $prefix_vars{'t_date'}; return; } $overall_stat{'peak'}{$cur_last_log_timestamp}{connection}++; $connection_info{count}++; $connection_info{user}{$usr}++; $connection_info{database}{$db}++; $connection_info{database_user}{$db}{$usr}++; $connection_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; ############################################################################### # May be used in the future to display more detailed information on connection # $connection_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{user}{$usr}++; # $connection_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{database}{$db}++; # $connection_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{database_user}{$db}{$usr}++; ############################################################################### if ($graph) { $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{connection}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{connection}{second}{$prefix_vars{'t_sec'}}++; } if (exists $conn_received{$t_pid}) { $connection_info{host}{$conn_received{$t_pid}}++; delete $conn_received{$t_pid}; } elsif ($host) { $connection_info{host}{$host}++; } return; } # Store session duration if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /disconnection: session time: ([^\s]+) user=([^\s]+) database=([^\s]+) host=([^\s]+)/)) { return if ($disable_session); delete $current_sessions{$prefix_vars{'t_pid'}}; if ($extension eq 'tsung') { $tsung_session{$prefix_vars{'t_pid'}}{disconnection}{date} = $prefix_vars{'t_timestamp'}; } my $time = $1; my $usr = $2; my $db = $3; my $host = $4; $host = _gethostbyaddr($host) if ($dns_resolv); if ($extension eq 'tsung') { &store_tsung_session($prefix_vars{'t_pid'}); return; } # Store time in milliseconds $time =~ /(\d+):(\d+):(\d+\.\d+)/; $time = ($3 * 1000) + ($2 * 60 * 1000) + ($1 * 60 * 60 * 1000); $session_info{count}++; $session_info{duration} += $time; $session_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $session_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{duration} += $time; $session_info{database}{$db}{count}++; $session_info{database}{$db}{duration} += $time; $session_info{user}{$usr}{count}++; $session_info{user}{$usr}{duration} += $time; $session_info{host}{$host}{count}++; $session_info{host}{$host}{duration} += $time; my $app = 'unknown'; $app = $prefix_vars{'t_appname'} if (exists $prefix_vars{'t_appname'}); $session_info{app}{$app}{count}++; $session_info{app}{$app}{duration} += $time; my $k = &get_hist_inbound($time, @histogram_session_time); $overall_stat{histogram}{session_time}{$k}++; $overall_stat{histogram}{session_total}++; return; } # Store autovacuum information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /automatic vacuum of table "([^\s]+)": index scans: (\d+)/ ) ) { return if ($disable_autovacuum); $autovacuum_info{count}++; $autovacuum_info{tables}{$1}{vacuums} += 1; $autovacuum_info{tables}{$1}{idxscans} += $2; $autovacuum_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{autovacuum}{count}++; $cur_info{$t_pid}{vacuum} = $1; if ($fmt eq 'csv') { if ($prefix_vars{'t_query'} =~ /pages: (\d+) removed, (\d+) remain/) { $autovacuum_info{tables}{$cur_info{$t_pid}{vacuum}}{pages}{removed} += $1; } if ($prefix_vars{'t_query'} =~ /tuples: (\d+) removed, (\d+) remain/) { $autovacuum_info{tables}{$cur_info{$t_pid}{vacuum}}{tuples}{removed} += $1; } if ($prefix_vars{'t_query'} =~ m#system usage: CPU .* sec elapsed (.*) sec#) { if ($1 > $autovacuum_info{peak}{system_usage}{elapsed}) { $autovacuum_info{peak}{system_usage}{elapsed} = $1; $autovacuum_info{peak}{system_usage}{table} = $cur_info{$t_pid}{vacuum}; $autovacuum_info{peak}{system_usage}{date} = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} " . "$prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; } } } $cur_info{$t_pid}{year} = $prefix_vars{'t_year'}; $cur_info{$t_pid}{month} = $prefix_vars{'t_month'}; $cur_info{$t_pid}{day} = $prefix_vars{'t_day'}; $cur_info{$t_pid}{hour} = $prefix_vars{'t_hour'}; $cur_info{$t_pid}{min} = $prefix_vars{'t_min'}; $cur_info{$t_pid}{sec} = $prefix_vars{'t_sec'}; return; } # Store autoanalyze information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /automatic analyze of table "([^\s]+)"/ ) ) { return if ($disable_autovacuum); my $table = $1; $autoanalyze_info{count}++; $autoanalyze_info{tables}{$table}{analyzes} += 1; $autoanalyze_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{autoanalyze}{count}++; if ($prefix_vars{'t_query'} =~ m#system usage: CPU .* sec elapsed (.*) sec#) { if ($1 > $autoanalyze_info{peak}{system_usage}{elapsed}) { $autoanalyze_info{peak}{system_usage}{elapsed} = $1; $autoanalyze_info{peak}{system_usage}{table} = $table; $autoanalyze_info{peak}{system_usage}{date} = $cur_last_log_timestamp; } } } # Store checkpoint or restartpoint information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /point complete: wrote (\d+) buffers \(([^\)]+)\); (\d+) transaction log file\(s\) added, (\d+) removed, (\d+) recycled; write=([0-9\.]+) s, sync=([0-9\.]+) s, total=([0-9\.]+) s/ ) ) { # Example: LOG: checkpoint complete: wrote 8279 buffers (50.5%); 0 transaction log file(s) added, 0 removed, 0 recycled; write=2.277 s, sync=0.194 s, total=2.532 s; sync files=13, longest=0.175 s, average=0.014 s; distance=402024 kB, estimate=402024 kB return if ($disable_checkpoint); $checkpoint_info{wbuffer} += $1; #$checkpoint_info{percent_wbuffer} += $2; $checkpoint_info{file_added} += $3; $checkpoint_info{file_removed} += $4; $checkpoint_info{file_recycled} += $5; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{walfile_usage} += ($3 + $5); $checkpoint_info{write} += $6; $checkpoint_info{sync} += $7; $checkpoint_info{total} += $8; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{wbuffer} += $1; #$per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{percent_wbuffer} += $2; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{file_added} += $3; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{file_removed} += $4; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{file_recycled} += $5; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{write} += $6; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync} += $7; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{total} += $8; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{checkpoint_wbuffer} += $1; if ($6 > $overall_checkpoint{checkpoint_write}) { $overall_checkpoint{checkpoint_write} = $6; } if ($7 > $overall_checkpoint{checkpoint_sync}) { $overall_checkpoint{checkpoint_sync} = $7; } if ($prefix_vars{'t_query'} =~ /sync files=(\d+), longest=([0-9\.]+) s, average=([0-9\.]+) s/) { $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_files} += $1; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_longest} = $2 if ($2 > $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_longest}); $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_avg} += $3; } # 9.6: LOG: checkpoint complete: wrote 0 buffers (0.0%); 0 transaction log file(s) added, 1 removed, 0 recycled; write=0.016 s, sync=0.000 s, total=0.054 s; sync files=0, longest=0.000 s, average=0.000 s; distance=0 kB, estimate=14744 kB if ($prefix_vars{'t_query'} =~ /; distance=(\d+) kB, estimate=(\d+) kB/) { $checkpoint_info{distance} += $1; $checkpoint_info{estimate} += $2; if ($1 > $overall_checkpoint{distance}) { $overall_checkpoint{distance} = $1; } if ($2 > $overall_checkpoint{estimate}) { $overall_checkpoint{estimate} = $2; } $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{distance} += $1; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{distance_count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{estimate} += $2; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{distance} += $1; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{estimate} += $2; } return; } # Store checkpoint warning information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /checkpoints are occurring too frequently \((\d+) seconds apart\)/)) { return if ($disable_checkpoint); $checkpoint_info{warning}++; $checkpoint_info{warning_seconds} += $1; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{warning}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{warning_seconds} += $1; $overall_checkpoint{checkpoint_warning}++; return; } # Store old restartpoint information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /restartpoint complete: wrote (\d+) buffers \(([^\)]+)\); write=([0-9\.]+) s, sync=([0-9\.]+) s, total=([0-9\.]+) s/ ) ) { # Example: LOG: restartpoint complete: wrote 1568 buffers (0.3%); write=146.237 s, sync=0.251 s, total=146.489 s return if ($disable_checkpoint); $checkpoint_info{wbuffer} += $1; #$checkpoint_info{percent_wbuffer} += $2; $checkpoint_info{write} += $6; $checkpoint_info{sync} += $7; $checkpoint_info{total} += $8; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{wbuffer} += $1; #$per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{percent_wbuffer} += $2; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{write} += $6; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync} += $7; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{total} += $8; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{checkpoint_wbuffer} += $1; if ($6 > $overall_checkpoint{checkpoint_write}) { $overall_checkpoint{checkpoint_write} = $6; } if ($7 > $overall_checkpoint{checkpoint_sync}) { $overall_checkpoint{checkpoint_sync} = $7; } if ($prefix_vars{'t_query'} =~ /distance=(\d+) kB, estimate=(\d+) kB/) { $checkpoint_info{distance} += $1; $checkpoint_info{estimate} += $2; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{distance} += $1; $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{estimate} += $2; if ($1 > $overall_checkpoint{distance}) { $overall_checkpoint{distance} = $1; } if ($2 > $overall_checkpoint{estimate}) { $overall_checkpoint{estimate} = $2; } $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{distance} += $1; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{distance_count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{estimate} += $2; } return; } # Look at bind/execute parameters if any if ($cur_info{$t_pid}{query}) { # Remove obsolete connection storage delete $conn_received{$cur_info{$t_pid}{pid}}; # The query is complete but we are missing some debug/info/bind parameter logs if ($cur_info{$t_pid}{loglevel} eq 'LOG') { # Apply bind parameters if any if (($prefix_vars{'t_loglevel'} eq 'DETAIL') && ($prefix_vars{'t_query'} =~ /parameters: (.*)/)) { $cur_info{$t_pid}{parameters} = "$1"; # go look at other params return; # replace the execute statements with the prepared query and set the parameters } elsif (($prefix_vars{'t_loglevel'} eq 'DETAIL') && ($prefix_vars{'t_query'} =~ s/prepare: PREPARE\s+([^\s]+)\s+AS\s+(.*)//is)) { my $q_name = $1; my $real_query = $2; if ($cur_info{$t_pid}{query} =~ /\b$q_name\b/) { $cur_info{$t_pid}{query} =~ s/EXECUTE\s+$q_name(\s+)\(//is; $cur_info{$t_pid}{parameters} = $cur_info{$t_pid}{query}; $cur_info{$t_pid}{parameters} =~ s/\)$//; $cur_info{$t_pid}{query} = $real_query; $cur_info{$t_pid}{'bind'} = 1; } # go look at other params return; } } } # Apply bind parameters if any if ($prefix_vars{'t_detail'} =~ /parameters: (.*)/) { $cur_info{$t_pid}{parameters} = "$1"; # go look at other params } #### # Store current query information #### # Log lines with duration only, generated by log_duration = on in postgresql.conf if ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms$//s) { $prefix_vars{'t_duration'} = $1; $prefix_vars{'t_query'} = ''; my $k = &get_hist_inbound($1, @histogram_query_time); $overall_stat{histogram}{query_time}{$k}++; $overall_stat{histogram}{query_total}++; &set_current_infos($t_pid); return; } # Store info as tsung session following the output file extension if (($extension eq 'tsung') && !exists $tsung_session{$prefix_vars{'t_pid'}}{connection} && $prefix_vars{'t_dbname'}) { $tsung_session{$prefix_vars{'t_pid'}}{connection}{database} = $prefix_vars{'t_dbname'}; $tsung_session{$prefix_vars{'t_pid'}}{connection}{user} = $prefix_vars{'t_dbuser'}; $tsung_session{$prefix_vars{'t_pid'}}{connection}{date} = $prefix_vars{'t_date'}; } my $t_action = ''; # Store query duration generated by log_min_duration >= 0 in postgresql.conf if ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms\s+(query|statement): //is) { $prefix_vars{'t_duration'} = $1; $t_action = $2; my $k = &get_hist_inbound($1, @histogram_query_time); $overall_stat{histogram}{query_time}{$k}++; $overall_stat{histogram}{query_total}++; if (($t_action eq 'statement') && $prefix_vars{'t_query'} =~ /^(PREPARE|EXECUTE)\b/i) { $overall_stat{lc($1)}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{lc($1)}++; # We do not store prepare statement return if (lc($1) eq 'prepare'); } # Log line with duration and statement from prepared queries } elsif ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms\s+(prepare|parse|bind|execute from fetch|execute)\s+[^:]+:\s//is) { $prefix_vars{'t_duration'} = $1; $t_action = $2; $t_action =~ s/ from fetch//; $t_action = 'prepare' if ($t_action eq 'parse'); $overall_stat{$t_action}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{$t_action}++; # Skipping prepare, parse and bind logs return if ($t_action !~ /query|statement|execute/); my $k = &get_hist_inbound($prefix_vars{'t_duration'}, @histogram_query_time); $overall_stat{histogram}{query_time}{$k}++; $overall_stat{histogram}{query_total}++; $prefix_vars{'t_bind'} = 1; # Activate storage of the explain plan generated by auto_explain } elsif ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms\s+plan://is) { $prefix_vars{'t_duration'} = $1; $cur_plan_info{$prefix_vars{'t_pid'}}{duration} = $prefix_vars{'t_duration'}; my $k = &get_hist_inbound($prefix_vars{'t_duration'}, @histogram_query_time); $overall_stat{histogram}{query_time}{$k}++; $overall_stat{histogram}{query_total}++; # Log line without duration at all } elsif ($prefix_vars{'t_query'} =~ s/(query|statement): //is) { $t_action = $1; if (($t_action eq 'statement') && $prefix_vars{'t_query'} =~ /^(PREPARE|EXECUTE)\b/i) { $overall_stat{lc($1)}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{lc($1)}++; } # Log line without duration at all from prepared queries } elsif ($prefix_vars{'t_query'} =~ s/(prepare|parse|bind|execute from fetch|execute)\s+[^:]+:\s//is) { $t_action = $1; $t_action =~ s/ from fetch//; $t_action = 'prepare' if ($t_action eq 'parse'); $overall_stat{$t_action}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{$t_action}++; # Skipping parse and bind logs return if ($t_action !~ /query|statement|execute/); $prefix_vars{'t_bind'} = 1; # Log line that could not be parsed } elsif ($prefix_vars{'t_loglevel'} eq 'LOG') { if ($prefix_vars{'t_query'} !~ /incomplete startup packet|connection|receive|unexpected EOF|checkpoint starting:|could not send data to client|parameter .*configuration file|autovacuum launcher|automatic (analyze|vacuum)|detected deadlock while waiting for/ ) { &logmsg('DEBUG', "Unrecognized line: $prefix_vars{'t_loglevel'}: $prefix_vars{'t_query'} at line $nlines"); } return; } if ( ($fmt eq 'csv') && ($prefix_vars{'t_loglevel'} ne 'LOG')) { $cur_info{$t_pid}{detail} = $prefix_vars{'t_detail'}; $cur_info{$t_pid}{hint} = $prefix_vars{'t_hint'}; $cur_info{$t_pid}{context} = $prefix_vars{'t_context'}; $cur_info{$t_pid}{statement} = $prefix_vars{'t_statement'} } &set_current_infos($t_pid); return 1; } sub parse_pgbouncer { my $fmt = shift; my $t_pid = $prefix_vars{'t_pid'}; my $t_session_id = $prefix_vars{'t_session_id'} || $prefix_vars{'t_pid'}; my $date_part = "$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}"; my $cur_last_log_timestamp = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} " . "$prefix_vars{t_hour}:$prefix_vars{t_min}:$prefix_vars{t_sec}"; # Do not parse lines that are not an error message when error only report is requested if ($error_only && ($prefix_vars{'t_loglevel'} !~ $full_error_regex)) { return; } # Do not parse lines that are an error-like message when error reports are not wanted if ($disable_error && ($prefix_vars{'t_loglevel'} =~ $full_error_regex)) { return; } if ($prefix_vars{'t_loglevel'} eq 'STATS') { $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{t_req} = $prefix_vars{'t_req/s'}; $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{t_req} = $prefix_vars{'t_req/s'}; $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{t_inbytes} = $prefix_vars{'t_inbytes/s'}; $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{t_inbytes} = $prefix_vars{'t_inbytes/s'}; $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{t_outbytes} = $prefix_vars{'t_outbytes/s'}; $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{t_outbytes} = $prefix_vars{'t_outbytes/s'}; # We need millisecond instead of microsecond from pgbouncer my $duration = int($prefix_vars{'t_avgduration'}/1000); $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{t_avgduration} = $duration; $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{t_avgduration} = $duration; return; } elsif ($prefix_vars{'t_loglevel'} =~ $main_error_regex) { if (($prefix_vars{'t_loglevel'} eq 'WARNING') || ($prefix_vars{'t_loglevel'} eq 'ERROR')) { if ($prefix_vars{'t_query'} =~ /dropping database '([^']+)' as it does not exist anymore/) { $prefix_vars{t_dbname} = $1; } elsif ($prefix_vars{'t_query'} =~ /^([^:]+): (.*?)\/(.*?)\@([^:]+):\d+ (.*)/) { $prefix_vars{t_query} = $5; $prefix_vars{t_dbname} = $2; $prefix_vars{t_dbuser} = $3; $prefix_vars{t_dbclient} = $4; $prefix_vars{t_session_id} = $1; } else { $prefix_vars{'t_query'} =~ s/^S: //; } # Add log level at beginning of the query and normalize it $prefix_vars{'t_query'} = $prefix_vars{'t_loglevel'} . ": " . $prefix_vars{'t_query'}; my $normalized_error = &pgb_normalize_error($prefix_vars{'t_query'}); # Stores total and normalized error count $pgb_overall_stat{'errors_number'}++; $pgb_error_info{$normalized_error}{count}++; # Stores normalized error count per time my $cur_day_str = "$prefix_vars{t_year}$prefix_vars{t_month}$prefix_vars{t_day}"; my $cur_hour_str = "$prefix_vars{t_hour}"; $pgb_error_info{$normalized_error}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{count}++; $pgb_error_info{$normalized_error}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{min}{$prefix_vars{t_min}}++; # Stores normalized query samples if ($sample > 0) { &pgb_set_top_error_sample( $normalized_error, $cur_last_log_timestamp, $prefix_vars{t_query}, $prefix_vars{t_dbname}, $prefix_vars{t_dbuser}, $prefix_vars{t_client} || $prefix_vars{t_dbclient} ); } # Stores taken reserved pool if ($prefix_vars{'t_query'} =~ /Taking connection from reserve_pool/) { my $pool = "$prefix_vars{t_dbuser}\@$prefix_vars{t_dbname}"; $pgb_pool_info{$pool}{count}++; $pgb_pool_info{$pool}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{count}++; $pgb_pool_info{$pool}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{min}{$prefix_vars{t_min}}++; } } else { print STDERR "UNPARSED LOG LEVEL: $prefix_vars{'t_loglevel'} => $prefix_vars{'t_query'}\n"; } } elsif ($prefix_vars{'t_loglevel'} ne 'LOG') { print STDERR "UNRECOGNIZED LOG LEVEL: $prefix_vars{'t_loglevel'} => $prefix_vars{'t_query'}\n"; } # Stores connection activity if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /login attempt: db=([^\s]+) user=([^\s]+)/)) { return if ($disable_connection); my $usr = $prefix_vars{'t_dbuser'} || $2; my $db = $prefix_vars{'t_dbname'} || $1; my $host= $prefix_vars{'t_client'} || ''; $host = _gethostbyaddr($host) if ($dns_resolv); $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{connection}++; $pgb_connection_info{count}++; $pgb_connection_info{user}{$usr}++; $pgb_connection_info{host}{$host}++; $pgb_connection_info{database}{$db}++; $pgb_connection_info{database_user}{$db}{$usr}++; $pgb_connection_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; if ($graph) { $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{connection}{count}++; $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{connection}{second}{$prefix_vars{'t_sec'}}++; } # set current session workload if ( !$disable_session ) { $pgb_current_sessions{$t_session_id} = $prefix_vars{'t_timestamp'}; my $sess_count = scalar keys %pgb_current_sessions; $pgb_overall_stat{'peak'}{$cur_last_log_timestamp}{session} = $sess_count; $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{session}{count} = $sess_count; $pgb_per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{"$prefix_vars{'t_min'}"}{session}{second}{$prefix_vars{'t_sec'}} = $sess_count; } return; } # Store session duration if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /\(age=(\d+)\)$/)) { return if ($disable_session); # Use millisecond for session duration my $time = $1*1000; my $usr = $prefix_vars{'t_dbuser'} || '(nousr)'; my $db = $prefix_vars{'t_dbname'} || '(nodb)'; my $host= $prefix_vars{'t_client'} || ''; $host = _gethostbyaddr($host) if ($dns_resolv && $host); # Store time in milliseconds since the connection attempt if ($pgb_current_sessions{$t_session_id} =~ /(\d+):(\d+):(\d+\.\d+)$/) { my $time1 = ($3 * 1000) + ($2 * 60 * 1000) + ($1 * 60 * 60 * 1000); $prefix_vars{'t_timestamp'} =~ /(\d+):(\d+):(\d+\.\d+)$/; my $time2 = ($3 * 1000) + ($2 * 60 * 1000) + ($1 * 60 * 60 * 1000); $time = $time2 - $time1; } $pgb_session_info{count}++; $pgb_session_info{duration} += $time; $pgb_session_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $pgb_session_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{duration} += $time; $pgb_session_info{database}{$db}{count}++; $pgb_session_info{database}{$db}{duration} += $time; $pgb_session_info{user}{$usr}{count}++; $pgb_session_info{user}{$usr}{duration} += $time; $pgb_session_info{host}{$host}{count}++; $pgb_session_info{host}{$host}{duration} += $time; my $k = &get_hist_inbound($time, @histogram_session_time); $pgb_overall_stat{histogram}{session_time}{$k}++; $pgb_overall_stat{histogram}{session_total}++; delete $pgb_current_sessions{$t_session_id}; return; } return 1; } # Remain current parsed information into memory for subsequent use sub set_current_infos { my $t_pid = shift; $cur_info{$t_pid}{year} = $prefix_vars{'t_year'} if (!$cur_info{$t_pid}{year}); $cur_info{$t_pid}{month} = $prefix_vars{'t_month'} if (!$cur_info{$t_pid}{month}); $cur_info{$t_pid}{day} = $prefix_vars{'t_day'} if (!$cur_info{$t_pid}{day}); $cur_info{$t_pid}{hour} = $prefix_vars{'t_hour'} if (!exists $cur_info{$t_pid}{sec} || ($cur_info{$t_pid}{hour} eq '')); $cur_info{$t_pid}{min} = $prefix_vars{'t_min'} if (!exists $cur_info{$t_pid}{sec} || ($cur_info{$t_pid}{min} eq '')); $cur_info{$t_pid}{sec} = $prefix_vars{'t_sec'} if (!exists $cur_info{$t_pid}{sec} || ($cur_info{$t_pid}{sec} eq '')); $cur_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'} if (!$cur_info{$t_pid}{timestamp}); $cur_info{$t_pid}{ident} = $prefix_vars{'t_ident'} if (!$cur_info{$t_pid}{ident}); $cur_info{$t_pid}{query} = $prefix_vars{'t_query'} if (!$cur_info{$t_pid}{query}); $cur_info{$t_pid}{duration} = $prefix_vars{'t_duration'} if (!$cur_info{$t_pid}{duration}); $cur_info{$t_pid}{pid} = $prefix_vars{'t_pid'} if (!$cur_info{$t_pid}{pid}); $cur_info{$t_pid}{session} = $prefix_vars{'t_session_line'} if (!$cur_info{$t_pid}{session}); $cur_info{$t_pid}{loglevel} = $prefix_vars{'t_loglevel'} if (!$cur_info{$t_pid}{loglevel}); $cur_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'} if (!$cur_info{$t_pid}{dbname}); $cur_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'} if (!$cur_info{$t_pid}{dbuser}); $cur_info{$t_pid}{dbclient} = $prefix_vars{'t_client'} || $prefix_vars{'t_dbclient'} if (!$cur_info{$t_pid}{dbclient}); $cur_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'} if (!$cur_info{$t_pid}{dbappname}); $cur_info{$t_pid}{date} = $prefix_vars{'t_date'} if (!$cur_info{$t_pid}{date}); $cur_info{$t_pid}{bind} = $prefix_vars{'t_bind'} if (!$cur_info{$t_pid}{bind}); $cur_info{$t_pid}{sqlstate} = $prefix_vars{'t_sqlstate'} if (!$cur_info{$t_pid}{sqlstate}); # Extract the query part from the plan if (exists $cur_plan_info{$t_pid} && exists $cur_plan_info{$t_pid}{plan} && $cur_plan_info{$t_pid}{plan} ne '') { my $key = 'query'; my @plan = split("\n", $cur_plan_info{$t_pid}{plan}); $plan[0] =~ s/^\s*Query Text:\s+//; foreach my $l (@plan) { $key = 'plan' if ($l =~ /\(cost=\d+.*rows=\d+/); $cur_info{$t_pid}{$key} .= "$l\n"; } $cur_info{$t_pid}{query} =~ s/^\s*Query Text:\s+//s; delete $cur_plan_info{$t_pid}; } } sub store_tsung_session { my $pid = shift; return if ($#{$tsung_session{$pid}{dates}} < 0); # Open filehandle my $fh = new IO::File ">>$outfile"; if (not defined $fh) { localdie("FATAL: can't write to $outfile, $!\n"); } if ($pid) { print $fh " \n"; if (exists $tsung_session{$pid}{connection}{database}) { print $fh qq{ }; } if ($#{$tsung_session{$pid}{dates}} >= 0) { my $sec = 0; if ($tsung_session{$pid}{connection}{date}) { $sec = $tsung_session{$pid}{dates}[0] - $tsung_session{$pid}{connection}{date}; } print $fh " \n" if ($sec > 0); print $fh " \n"; for (my $i = 0 ; $i <= $#{$tsung_session{$pid}{queries}} ; $i++) { $tsung_queries++; $sec = 0; if ($i > 0) { $sec = $tsung_session{$pid}{dates}[$i] - $tsung_session{$pid}{dates}[$i - 1]; print $fh " \n" if ($sec > 0); } print $fh " \n"; } print $fh " \n"; } if ($#{$tsung_session{$pid}{dates}} >= 0) { my $sec = $tsung_session{$pid}{disconnection}{date} - $tsung_session{$pid}{dates}[-1]; print $fh " \n" if ($sec > 0); } if (exists $tsung_session{$pid}{connection}{database}) { print $fh " \n"; } print $fh " \n\n"; delete $tsung_session{$pid}; } $fh->close; } sub store_queries { my $t_pid = shift; my $end = shift; # With separate log_duration and log_statement wait duration before storing the entry return 0 if (!$end && $log_duration && ($cur_info{$t_pid}{duration} eq '') && ($cur_info{$t_pid}{query} ne '')); # Remove comments if required if ($remove_comment) { $cur_info{$t_pid}{query} =~ s/\/\*(.*?)\*\///gs; } # Anonymize query if requested by the user if ($anonymize && exists $cur_info{$t_pid}{query}) { $cur_info{$t_pid}{query} = &anonymize_query($cur_info{$t_pid}{query}); } return 0 if (!exists $cur_info{$t_pid}); return 1 if (!$cur_info{$t_pid}{year}); # Cleanup and pre-normalize the current query $cur_info{$t_pid}{query} =~ s/^\s+//s; $cur_info{$t_pid}{query} =~ s/[\s;]+$//s; # Just store normalized query when --normalized-only is used if ($dump_normalized_only && $cur_info{$t_pid}{query}) { # Add a semi-colon at end of the query $cur_info{$t_pid}{query} .= ';' if ($cur_info{$t_pid}{query} !~ /;\s*$/s); # Normalize query my $normalized = &normalize_query($cur_info{$t_pid}{query}); # Store normalized query count $normalyzed_info{$normalized}{count}++; return 1; } # Replace bind parameters values in the query if any if (exists $cur_info{$t_pid}{parameters} && ($cur_info{$t_pid}{parameters} =~ /[,\s]*\$(\d+)\s=\s/)) { my @t_res = split(/[,\s]*\$(\d+)\s=\s/, $cur_info{$t_pid}{parameters}); shift(@t_res); for (my $i = 0 ; $i < $#t_res ; $i += 2) { $cur_info{$t_pid}{query} =~ s/\$$t_res[$i]\b/$t_res[$i+1]/s; } } else { # parameters from EXECUTE statements my @t_res = split(/[\s]*,[\s]*/, $cur_info{$t_pid}{parameters}); for (my $i = 0 ; $i <= $#t_res ; $i++) { my $num = $i + 1; $cur_info{$t_pid}{query} =~ s/\$$num\b/$t_res[$i]/s; } } # We only process stored object with query here if ($cur_info{$t_pid}{query}) { # Should we just want select queries if ($select_only) { return 1 if ($cur_info{$t_pid}{query} !~ /^SELECT/is); } # Should we have to exclude some queries if ($#exclude_query >= 0) { foreach (@exclude_query) { if ($cur_info{$t_pid}{query} =~ /$_/i) { $cur_info{$t_pid}{query} = ''; return 1; } } } # Should we have to include only some queries if ($#include_query >= 0) { foreach (@include_query) { if ($cur_info{$t_pid}{query} !~ /$_/i) { $cur_info{$t_pid}{query} = ''; return 1; } } } # Dump queries as tsung request and return if ($extension eq 'tsung') { if ($cur_info{$t_pid}{loglevel} eq 'LOG') { push(@{$tsung_session{$t_pid}{queries}}, $cur_info{$t_pid}{query}); push(@{$tsung_session{$t_pid}{dates}}, $cur_info{$t_pid}{date}); if (!exists $tsung_session{$t_pid}{connection} && $cur_info{$t_pid}{dbname}) { $tsung_session{$t_pid}{connection}{database} = $cur_info{$t_pid}{dbname}; $tsung_session{$t_pid}{connection}{user} = $cur_info{$t_pid}{dbuser}; $tsung_session{$t_pid}{connection}{date} = $cur_info{$t_pid}{date}; } } # Stores global statistics $overall_stat{'queries_number'}++; $overall_stat{'queries_duration'} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); return 1; } # Truncate the query if requested by the user $cur_info{$t_pid}{query} = substr($cur_info{$t_pid}{query}, 0, $maxlength) . '[...]' if (($maxlength > 0) && (length($cur_info{$t_pid}{query}) > $maxlength)); } my $cur_day_str = "$cur_info{$t_pid}{year}$cur_info{$t_pid}{month}$cur_info{$t_pid}{day}"; my $cur_hour_str = "$cur_info{$t_pid}{hour}"; # Store the collected information into global statistics if ($cur_info{$t_pid}{loglevel} =~ $main_error_regex) { # Add log level at beginning of the query and normalize it $cur_info{$t_pid}{query} = $cur_info{$t_pid}{loglevel} . ": " . $cur_info{$t_pid}{query}; my $normalized_error = &normalize_error($cur_info{$t_pid}{query}); # Stores total and normalized error count $overall_stat{'errors_number'}++; $error_info{$normalized_error}{count}++; # Stores normalized error count per time $error_info{$normalized_error}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{count}++; $error_info{$normalized_error}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{min}{$cur_info{$t_pid}{min}}++; # Stores log level count per minute $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{log_level}{$cur_info{$t_pid}{loglevel}}++; # Stores normalized query samples if ($sample > 0) { my $cur_last_log_timestamp = "$cur_info{$t_pid}{year}-$cur_info{$t_pid}{month}-$cur_info{$t_pid}{day} " . "$cur_info{$t_pid}{hour}:$cur_info{$t_pid}{min}:$cur_info{$t_pid}{sec}"; &set_top_error_sample( $normalized_error, $cur_last_log_timestamp, $cur_info{$t_pid}{query}, $cur_info{$t_pid}{detail}, $cur_info{$t_pid}{context}, $cur_info{$t_pid}{statement}, $cur_info{$t_pid}{hint}, $cur_info{$t_pid}{dbname}, $cur_info{$t_pid}{dbuser}, $cur_info{$t_pid}{dbappname}, $cur_info{$t_pid}{dbclient}, $cur_info{$t_pid}{sqlstate}, ); } } elsif ($cur_info{$t_pid}{loglevel} eq 'LOG') { # Stores global statistics $overall_stat{'queries_number'}++; $overall_stat{'queries_duration'} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); my $cur_last_log_timestamp = "$cur_info{$t_pid}{year}-$cur_info{$t_pid}{month}-$cur_info{$t_pid}{day} " . "$cur_info{$t_pid}{hour}:$cur_info{$t_pid}{min}:$cur_info{$t_pid}{sec}"; if (!$overall_stat{'first_query_ts'} || ($overall_stat{'first_query_ts'} gt $cur_last_log_timestamp)) { $overall_stat{'first_query_ts'} = $cur_last_log_timestamp; } if (!$overall_stat{'last_query_ts'} || ($overall_stat{'last_query_ts'} lt $cur_last_log_timestamp)) { $overall_stat{'last_query_ts'} = $cur_last_log_timestamp; } $overall_stat{'peak'}{$cur_last_log_timestamp}{query}++; if ($graph) { $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{count}++; $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{second}{$cur_info{$t_pid}{sec}}++; # Store min / max duration if ($cur_info{$t_pid}{duration}) { $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); if (!exists $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{min} || ($per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{min} > $cur_info{$t_pid}{duration})) { $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{min} = $cur_info{$t_pid}{duration}; } if (!exists $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{max} || ($per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{max} < $cur_info{$t_pid}{duration})) { $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{max} = $cur_info{$t_pid}{duration}; } } } # Counter per database and application name if ($cur_info{$t_pid}{dbname}) { $database_info{$cur_info{$t_pid}{dbname}}{count}++; $database_info{$cur_info{$t_pid}{dbname}}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $database_info{unknown}{count}++; $database_info{unknown}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbappname}) { $application_info{$cur_info{$t_pid}{dbappname}}{count}++; $application_info{$cur_info{$t_pid}{dbappname}}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $application_info{unknown}{count}++; $application_info{unknown}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbuser}) { $user_info{$cur_info{$t_pid}{dbuser}}{count}++; $user_info{$cur_info{$t_pid}{dbuser}}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $user_info{unknown}{count}++; $user_info{unknown}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbclient}) { $host_info{$cur_info{$t_pid}{dbclient}}{count}++; $host_info{$cur_info{$t_pid}{dbclient}}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $host_info{unknown}{count}++; $host_info{unknown}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{query}) { # Add a semi-colon at end of the query $cur_info{$t_pid}{query} .= ';' if ($cur_info{$t_pid}{query} !~ /;\s*$/s); # Normalize query my $normalized = &normalize_query($cur_info{$t_pid}{query}); my $action = uc($1); if ($normalized =~ $action_regex) { $action = uc($1); # If this is a copy statement try to find if this is a write or read statement if (($action eq 'COPY') && (($normalized =~ /FROM\s+STDIN/i) || ($normalized =~ /FROM\s+'[^']+'/is))) { $action = 'COPY FROM'; } elsif ($action eq 'COPY') { $action = 'COPY TO'; } elsif ($action eq 'WITH') { $action = 'CTE'; } elsif ($action eq 'CREATE' || $action eq 'DROP' || $action eq 'ALTER' || $action eq 'TRUNCATE') { $action = 'DDL'; } elsif ($action eq 'BEGIN' || $action eq 'COMMIT' || $action eq 'ROLLBACK' || $action eq 'START' || $action eq 'END' || $action eq 'SAVEPOINT') { $action = 'TCL'; } } else { $action = 'OTHERS'; } $overall_stat{$action}++; if ($action eq 'SELECT') { $overall_stat{'peak'}{$cur_last_log_timestamp}{select}++; } elsif ($action ne 'OTHERS') { $overall_stat{'peak'}{$cur_last_log_timestamp}{write}++; } $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{$action}{count}++; $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{$action}{second}{$cur_info{$t_pid}{sec}}++; $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{$action}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); if ($cur_info{$t_pid}{dbname}) { $database_info{$cur_info{$t_pid}{dbname}}{$action}++; $database_info{$cur_info{$t_pid}{dbname}}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $database_info{unknown}{$action}++; $database_info{unknown}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbappname}) { $application_info{$cur_info{$t_pid}{dbappname}}{$action}++; $application_info{$cur_info{$t_pid}{dbappname}}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $application_info{unknown}{$action}++; $application_info{unknown}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbuser}) { $user_info{$cur_info{$t_pid}{dbuser}}{$action}++; $user_info{$cur_info{$t_pid}{dbuser}}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $user_info{unknown}{$action}++; $user_info{unknown}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } if ($cur_info{$t_pid}{dbclient}) { $host_info{$cur_info{$t_pid}{dbclient}}{$action}++; $host_info{$cur_info{$t_pid}{dbclient}}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } else { $host_info{unknown}{$action}++; $host_info{unknown}{"$action|duration"} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); } # Store normalized query count $normalyzed_info{$normalized}{count}++; # Store normalized query count and duration per time $normalyzed_info{$normalized}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{count}++; $normalyzed_info{$normalized}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{min}{$cur_info{$t_pid}{min}}++; if ($cur_info{$t_pid}{duration}) { # Update top slowest queries statistics &set_top_slowest($cur_info{$t_pid}{query}, $cur_info{$t_pid}{duration}, $cur_last_log_timestamp, $cur_info{$t_pid}{dbname}, $cur_info{$t_pid}{dbuser}, $cur_info{$t_pid}{dbclient},$cur_info{$t_pid}{dbappname}, $cur_info{$t_pid}{bind}, $cur_info{$t_pid}{plan}); # Store normalized query total duration $normalyzed_info{$normalized}{duration} += $cur_info{$t_pid}{duration}; $normalyzed_info{$normalized}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{min_duration}{$cur_info{$t_pid}{min}} += $cur_info{$t_pid}{duration}; # Store min / max duration if (!exists $normalyzed_info{$normalized}{min} || ($normalyzed_info{$normalized}{min} > $cur_info{$t_pid}{duration})) { $normalyzed_info{$normalized}{min} = $cur_info{$t_pid}{duration}; } if (!exists $normalyzed_info{$normalized}{max} || ($normalyzed_info{$normalized}{max} < $cur_info{$t_pid}{duration})) { $normalyzed_info{$normalized}{max} = $cur_info{$t_pid}{duration}; } # Stores query/user information if ($cur_info{$t_pid}{dbuser}) { $normalyzed_info{$normalized}{users}{$cur_info{$t_pid}{dbuser}}{duration} += $cur_info{$t_pid}{duration}; $normalyzed_info{$normalized}{users}{$cur_info{$t_pid}{dbuser}}{count}++; } # Stores query/app information if ($cur_info{$t_pid}{dbappname}) { $normalyzed_info{$normalized}{apps}{$cur_info{$t_pid}{dbappname}}{duration} += $cur_info{$t_pid}{duration}; $normalyzed_info{$normalized}{apps}{$cur_info{$t_pid}{dbappname}}{count}++; } # Store normalized query count and duration per time $normalyzed_info{$normalized}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{duration} += $cur_info{$t_pid}{duration}; # Store normalized query samples &set_top_sample($normalized, $cur_info{$t_pid}{query}, $cur_info{$t_pid}{duration}, $cur_last_log_timestamp, $cur_info{$t_pid}{dbname}, $cur_info{$t_pid}{dbuser}, $cur_info{$t_pid}{dbclient},$cur_info{$t_pid}{dbappname}, $cur_info{$t_pid}{bind}, $cur_info{$t_pid}{plan}); } } } return 1; } sub store_temporary_and_lock_infos { my $t_pid = shift; return if (!$t_pid); # Store normalized query temp file size if required if (exists $cur_temp_info{$t_pid} && ($cur_temp_info{$t_pid}{query} ne '') && $cur_temp_info{$t_pid}{size}) { # Add a semi-colon at end of the query $cur_temp_info{$t_pid}{query} .= ';' if ($cur_temp_info{$t_pid}{query} !~ /;\s*$/s); # Anonymize query if requested by the user if ($anonymize) { $cur_temp_info{$t_pid}{query} = &anonymize_query($cur_temp_info{$t_pid}{query}); } # Normalize query my $normalized = &normalize_query($cur_temp_info{$t_pid}{query}); $normalyzed_info{$normalized}{tempfiles}{size} += $cur_temp_info{$t_pid}{size}; $normalyzed_info{$normalized}{tempfiles}{count}++; if ($normalyzed_info{$normalized}{tempfiles}{maxsize} < $cur_temp_info{$t_pid}{size}) { $normalyzed_info{$normalized}{tempfiles}{maxsize} = $cur_temp_info{$t_pid}{size}; } if (!exists($normalyzed_info{$normalized}{tempfiles}{minsize}) || $normalyzed_info{$normalized}{tempfiles}{minsize} > $cur_temp_info{$t_pid}{size}) { $normalyzed_info{$normalized}{tempfiles}{minsize} = $cur_temp_info{$t_pid}{size}; } &set_top_tempfile_info($cur_temp_info{$t_pid}{query}, $cur_temp_info{$t_pid}{size}, $cur_temp_info{$t_pid}{timestamp}, $cur_temp_info{$t_pid}{dbname}, $cur_temp_info{$t_pid}{dbuser}, $cur_temp_info{$t_pid}{dbclient}, $cur_temp_info{$t_pid}{dbappname}); # Check if we don't have sample for this query (occurs when log_min_duration_statement doesn't logged the query) if (!exists $normalyzed_info{$normalized}{samples}) { &set_top_sample($normalized, $cur_temp_info{$t_pid}{query}, $cur_temp_info{$t_pid}{duration}, $cur_temp_info{$t_pid}{timestamp}, $cur_temp_info{$t_pid}{dbname}, $cur_temp_info{$t_pid}{dbuser}, $cur_temp_info{$t_pid}{dbclient},$cur_temp_info{$t_pid}{dbappname}); } delete $cur_temp_info{$t_pid}; } # Store normalized query that waited the most if required if (exists $cur_lock_info{$t_pid}{wait} && ($cur_lock_info{$t_pid}{query} ne '')) { # Add a semi-colon at end of the query $cur_lock_info{$t_pid}{query} .= ';' if ($cur_lock_info{$t_pid}{query} !~ /;\s*$/s); # Anonymize query if requested by the user if ($anonymize) { $cur_temp_info{$t_pid}{query} = &anonymize_query($cur_temp_info{$t_pid}{query}); } # Normalize query my $normalized = &normalize_query($cur_lock_info{$t_pid}{query}); $normalyzed_info{$normalized}{locks}{wait} += $cur_lock_info{$t_pid}{wait}; $normalyzed_info{$normalized}{locks}{count}++; if ($normalyzed_info{$normalized}{locks}{maxwait} < $cur_lock_info{$t_pid}{wait}) { $normalyzed_info{$normalized}{locks}{maxwait} = $cur_lock_info{$t_pid}{wait}; } if (!exists($normalyzed_info{$normalized}{locks}{minwait}) || $normalyzed_info{$normalized}{locks}{minwait} > $cur_lock_info{$t_pid}{wait}) { $normalyzed_info{$normalized}{locks}{minwait} = $cur_lock_info{$t_pid}{wait}; } &set_top_locked_info($cur_lock_info{$t_pid}{query}, $cur_lock_info{$t_pid}{wait}, $cur_lock_info{$t_pid}{timestamp}, $cur_lock_info{$t_pid}{dbname}, $cur_lock_info{$t_pid}{dbuser}, $cur_lock_info{$t_pid}{dbclient}, $cur_lock_info{$t_pid}{dbappname}); # Check if we don't have sample for this query (occurs when log_min_duration_statement doesn't logged the query) if (!exists $normalyzed_info{$normalized}{samples}) { &set_top_sample($normalized, $cur_lock_info{$t_pid}{query}, $cur_lock_info{$t_pid}{duration}, $cur_lock_info{$t_pid}{timestamp}, $cur_lock_info{$t_pid}{dbname}, $cur_lock_info{$t_pid}{dbuser}, $cur_lock_info{$t_pid}{dbclient},$cur_lock_info{$t_pid}{dbappname}); } delete $cur_lock_info{$t_pid}; } # Store normalized query temp file size if required if (exists $cur_cancel_info{$t_pid} && ($cur_cancel_info{$t_pid}{query} ne '')) { # Add a semi-colon at end of the query $cur_cancel_info{$t_pid}{query} .= ';' if ($cur_cancel_info{$t_pid}{query} !~ /;\s*$/s); # Anonymize query if requested by the user if ($anonymize) { $cur_cancel_info{$t_pid}{query} = &anonymize_query($cur_cancel_info{$t_pid}{query}); } # Normalize query my $normalized = &normalize_query($cur_cancel_info{$t_pid}{query}); $normalyzed_info{$normalized}{cancelled}{count}++; &set_top_cancelled_info($cur_cancel_info{$t_pid}{query}, $cur_cancel_info{$t_pid}{count}, $cur_cancel_info{$t_pid}{timestamp}, $cur_cancel_info{$t_pid}{dbname}, $cur_cancel_info{$t_pid}{dbuser}, $cur_cancel_info{$t_pid}{dbclient}, $cur_cancel_info{$t_pid}{dbappname}); delete $cur_cancel_info{$t_pid}; } } # Normalize error messages sub normalize_error { my $orig_query = shift; return if (!$orig_query); # Remove character position $orig_query =~ s/ at character \d+.*//s; # Remove encoding detail $orig_query =~ s/(byte sequence for encoding).*/$1/; # Replace changing parameter by ... $orig_query =~ s/"[^"]*"/"..."/g; $orig_query =~ s/\(.*\)/\(...\)/g; $orig_query =~ s/column .* does not exist/column "..." does not exist/; $orig_query =~ s/(database system was.* at (?:log time )?).*/$1 .../; $orig_query =~ s/[0-9A-F]{24}/.../g; # Remove WAL filename $orig_query =~ s/, u_uuid: [^,]+, file_path:.*/. Retrying/; $orig_query =~ s/( for)? PID \d+//; $orig_query =~ s/ TIMELINE \d+/ TIMELINE n/; $orig_query =~ s/ [0-9A-F]+\/[0-9A-F]+ / x\/x /; $orig_query =~ s/ BASE_BACKUP LABEL '[^']*'.*/ BASE_BACKUP LABEL '...'/; $orig_query =~ s/(transaction|relation|database|process) \d+/$1 .../g; $orig_query =~ s/after \d+\.\d+/after .../; $orig_query =~ s/cannot drop ([^\s]+) [^\s]+ /cannot drop $1 ... /; $orig_query =~ s/\[[^\]]+\]/.../g; $orig_query =~ s/(character with byte sequence) .* (in encoding)/$1 ... $2/; $orig_query =~ s/(invalid input syntax for [^:]+:).*/$1 .../; $orig_query =~ s/character (.*?) of encoding/character ... of encoding/; $orig_query =~ s/(unterminated quoted string at or near) "[^"]+"/$1 "..."/; $orig_query =~ s/on page \d+ of relation/on page ... of relation/; $orig_query =~ s/(side location conflict at ).*/$1 .../; $orig_query =~ s/(permission denied for ^\s]+) .*/$1 .../; # Need more normalization stuff here return $orig_query; } # Normalize pgbouncer error messages sub pgb_normalize_error { my $orig_query = shift; return if (!$orig_query); # Replace changing parameter by ... $orig_query =~ s/"[^"]*"/"..."/g; $orig_query =~ s/'[^']*'/'...'/g; $orig_query =~ s/\(.*\)/\(...\)/g; # Need more normalization stuff here return $orig_query; } sub average_per_minutes { my $val = shift; my $idx = shift; my @lavgs = (); for (my $i = 0 ; $i < 60 ; $i += $idx) { push(@lavgs, sprintf("%02d", $i)); } for (my $i = 0 ; $i <= $#lavgs ; $i++) { if ($val == $lavgs[$i]) { return "$lavgs[$i]"; } elsif ($i == $#lavgs) { return "$lavgs[$i]"; } elsif (($val > $lavgs[$i]) && ($val < $lavgs[$i + 1])) { return "$lavgs[$i]"; } } return $val; } sub autodetect_format { my $file = shift; # a file must be passed return if (!$file); # Open log file for reading my $nfound = 0; my $nline = 0; my $fmt = ''; my %ident_name = (); my $fltf; if (!$remote_host) { localdie("FATAL: when looking for log file format, can't open file $file, $!\n") unless(open(TESTFILE, $file)); $fltf = ; close(TESTFILE); } # is file in binary format ? if ( $fltf =~ /^pst\d/ ) { $fmt = 'binary'; } else { # try to detect syslogs, stderr, csv or pgbouncer format my ($tfile, $totalsize) = &get_log_file($file, $remote_host); while (my $line = <$tfile>) { chomp($line); $line =~ s/\r//; next if (!$line); $nline++; my ($f, $i) = search_log_format($line); $nfound++ if ($f); $fmt = $f; $ident_name{$i}++ if ($i); last if (($nfound > 10) || ($nline > 5000)); } $tfile->close(); } # When --pgbouncer-only is used force the format if (!$format && $pgbouncer_only) { $pgbouncer_only = 1; $fmt = 'pgbouncer'; } elsif (!$format) { if (!$fmt || ($nfound < 10)) { localdie("FATAL: unable to detect log file format from $file, please use -f option.\n"); } } if (($fmt =~ /syslog/) && !$ident && (scalar keys %ident_name == 1)) { $ident = (keys %ident_name)[0]; } &logmsg('DEBUG', "Autodetected log format '$fmt' from $file"); return $fmt; } sub search_log_format { my $line = shift; my $fmt = ''; my $ident_name = ''; # Are pgbouncer syslog lines ? if ($line =~ /^[A-Z][a-z]{2}\s+\d+ \d+:\d+:\d+(?:\s[^\s]+)?\s[^\s]+\s([^\s\[]+)\[\d+\]: (.\-0x[0-9a-f\.]*|Stats):/) { localdie("FATAL: parsing pgbouncer log from syslog is not supported.\n"); } elsif ($line =~ /^\d+-\d+-\d+T\d+:\d+:\d+(?:.[^\s]+)?\s[^\s]+\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[\d+\]: (.\-0x[0-9a-f\.]*|Stats):/ ) { localdie("FATAL: parsing pgbouncer log from syslog is not supported.\n"); # Are syslog lines ? } elsif ($line =~ /^[A-Z][a-z]{2}\s+\d+\s\d+:\d+:\d+(?:\s[^\s]+)?\s[^\s]+\s([^\s\[]+)\[\d+\]:(?:\s\[[^\]]+\])?\s\[\d+\-\d+\].*?(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):/ ) { $fmt = 'syslog'; $ident_name = $1; } elsif ($line =~ /^\d+-\d+-\d+T\d+:\d+:\d+(?:.[^\s]+)?\s[^\s]+\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[\d+\]:(?:\s\[[^\]]+\])?\s\[\d+\-\d+\].*?(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):/ ) { $fmt = 'syslog2'; $ident_name = $1; # Are csv lines ? } elsif ( ( $line =~ /^\d+-\d+-\d+ \d+:\d+:\d+\.\d+(?: [A-Z\+\-\d]{3,6})?,.*,(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT),/ ) && ($line =~ tr/,/,/ >= 12) ) { $fmt = 'csv'; # Are default stderr lines since 10.0 ? } elsif ($line =~ /(\d{10}\.\d{3}|\d+-\d+-\d+ \d+:\d+:\d+)[\.0-9]*(?: [A-Z\+\-\d]{3,6})? \[(\d+)\] (LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+/ ) { $fmt = 'default'; # Are stderr lines ? } elsif ($line =~ /(\d{10}\.\d{3}|\d+-\d+-\d+ \d+:\d+:\d+)[\.0-9]*(?: [A-Z\+\-\d]{3,6})?(.*?)(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+/ ) { $fmt = 'stderr'; # Are pgbouncer lines ? } elsif ($line =~ /^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})? (\d+) (LOG|ERROR) (.\-0x[0-9a-f\.]*|Stats):/) { $fmt = 'pgbouncer'; # If we just have one single pgbouncer file, force pgbouncer_only to 1 $pgbouncer_only = 1 if ($#log_files == 0); } return ($fmt, $ident_name); } sub progress_bar { my ($got, $total, $width, $char, $queries, $errors) = @_; $width ||= 25; $char ||= '='; my $num_width = length $total; my $nchars = (($width - 1) * $got / $total); $nchars = ($width - 1) if ($nchars >= $width); if ($extension eq 'tsung') { sprintf( "[%-${width}s] Parsed %${num_width}s bytes of %s (%.2f%%), queries: %d\r", $char x $nchars . '>', $got, $total, 100 * $got / +$total, ($queries || $overall_stat{'queries_number'}) ); } elsif ($format eq 'binary') { my $file = $_[-1]; sprintf( "Loaded %d queries and %d events, reading binary file %s...\r", $overall_stat{'queries_number'}, ($overall_stat{'errors_number'}+$pgb_overall_stat{'errors_number'}), $file ); } else { sprintf( "[%-${width}s] Parsed %${num_width}s bytes of %s (%.2f%%), queries: %d, events: %d\r", $char x $nchars . '>', $got, $total, 100 * $got / +$total, ($queries || $overall_stat{'queries_number'}), ($errors || ($overall_stat{'errors_number'} + $pgb_overall_stat{'errors_number'})) ); } } sub jqplot_linegraph { my ($buttonid, $divid, $data1, $data2, $data3, $title, $ytitle, $legend1, $legend2, $legend3, $ytitle2, $data4, $legend4) = @_; if (!$data1) { return qq{

    $title

    NO DATASET
    }; } my $options_series = ''; if ($legend1) { $options_series .= "{ label: \"$legend1\", color: \"#6e9dc9\" },"; } if ($legend2) { $options_series .= "{ label: \"$legend2\", color: \"#f4ab3a\" },"; } if ($legend3) { $options_series .= "{ label: \"$legend3\", color: \"#ac7fa8\" },"; } if ($legend4) { if ($ytitle2) { $options_series .= "{ label: \"$legend4\", color: \"#8dbd0f\", yaxis: 'y2axis'},"; } else { $options_series .= "{ label: \"$legend4\", color: \"#8dbd0f\" },"; } } $options_series =~ s/,$//; my $dateTracker_dataopts = ''; if ($data1) { $data1 = "var ${divid}_${buttonid}_d1 = [$data1];"; $dateTracker_dataopts .= "${divid}_${buttonid}_d1,"; } if ($data2) { $data2 = "var ${divid}_${buttonid}_d2 = [$data2];"; $dateTracker_dataopts .= "${divid}_${buttonid}_d2,"; } if ($data3) { $data3 = "var ${divid}_${buttonid}_d3 = [$data3];"; $dateTracker_dataopts .= "${divid}_${buttonid}_d3,"; } if ($data4) { $data4 = "var ${divid}_${buttonid}_d4 = [$data4];"; $dateTracker_dataopts .= "${divid}_${buttonid}_d4,"; } $dateTracker_dataopts =~ s/,$//; return < EOF } sub jqplot_piegraph { my ($buttonid, $divid, $title, %data) = @_; if (scalar keys %data == 0) { return qq{

    $title

    NO DATASET
    }; } my $datadef = ''; foreach my $k (sort keys %data) { $datadef .= "['$k', $data{$k}],"; } $datadef =~ s/,$//; return < EOF } sub jqplot_histograph { my ($buttonid, $divid, $data1, $data2, $legend1, $legend2) = @_; if (!$data1) { return qq{
    NO DATASET
    }; } $legend1 ||= 'Queries'; my $y2decl = ''; my $y2vals = ''; if ($data2) { $legend2 ||= 'Avg. duration'; $y2decl = "var lines_${buttonid} = [$data2];"; $y2vals = ", lines_${buttonid}"; } my $title = ''; return < EOF } sub jqplot_duration_histograph { my ($buttonid, $divid, $legend, $range, %data) = @_; if (scalar keys %data == 0) { return qq{
    NO DATASET
    }; } $legend ||= 'Queries'; my $bars = ''; for (my $i = 1; $i <= $#{$range}; $i++) { my $k = "$range->[$i-1]-$range->[$i]ms"; my $lbl = "'" . &convert_time($range->[$i-1]) . '-' . &convert_time($range->[$i]) . "'"; $bars .= "[ $lbl, $data{$k}],"; } my $k = "> $range->[-1]ms"; $bars .= "[ '> " . &convert_time($range->[-1]) . "', $data{$k}]"; my $title = ''; return < EOF } sub build_log_line_prefix_regex { my $llp = shift; my %regex_map = ( '%a' => [('t_appname', '(.*)')], # application name '%u' => [('t_dbuser', '([0-9a-zA-Z\_\[\]\-\.]*)')], # user name '%d' => [('t_dbname', '([0-9a-zA-Z\_\[\]\-\.]*)')], # database name '%r' => [('t_hostport', '([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?[\(\d\)]*')], # remote host and port '%h' => [('t_client', '([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?')], # remote host '%p' => [('t_pid', '(\d+)')], # process ID '%n' => [('t_epoch', '(\d{10}\.\d{3})')], # timestamp as Unix epoch '%t' => [('t_timestamp', '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})(?: [A-Z\+\-\d]{3,6})?')], # timestamp without milliseconds '%m' => [('t_mtimestamp', '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})?')], # timestamp with milliseconds '%l' => [('t_session_line', '(\d+)')], # session line number '%s' => [('t_session_timestamp', '(\d{4}-\d{2}-\d{2} \d{2}):\d{2}:\d{2}(?: [A-Z\+\-\d]{3,6})?')], # session start timestamp '%c' => [('t_session_id', '([0-9a-f\.]*)')], # session ID '%v' => [('t_virtual_xid', '([0-9a-f\.\/]*)')], # virtual transaction ID '%x' => [('t_xid', '([0-9a-f\.\/]*)')], # transaction ID '%i' => [('t_command', '([0-9a-zA-Z\.\-\_\s]*)')], # command tag '%e' => [('t_sqlstate', '([0-9a-zA-Z]+)')], # SQL state ); my @param_list = (); $llp =~ s/([\[\]\|\(\)\{\}])/\\$1/g; $llp =~ s/\%l([^\d])\d+/\%l$1\\d\+/; $llp =~ s/\%q//; while ($llp =~ s/(\%[audrhpntmlscvxie])/$regex_map{"$1"}->[1]/) { push(@param_list, $regex_map{"$1"}->[0]); } # replace %% by a single % $llp =~ s/\%\%/\%/; # t_session_id (%c) can naturaly replace pid as unique session id # when it is given in log_line_prefix and pid is not present. $use_sessionid_as_pid = 1 if ( grep(/t_session_id/, @param_list) && !grep(/t_pid/, @param_list) ); # Check regex in log line prefix from command line &check_regex($llp, '--prefix'); return ($llp, @param_list); } # Inclusion of Perl package SQL::Beautify # Copyright (C) 2009 by Jonas Kramer # Published under the terms of the Artistic License 2.0. { package SQL::Beautify; use strict; use warnings; our $VERSION = 0.04; use Carp; # Keywords from SQL-92, SQL-99, SQL-2003, SQL-2008 and SQL-2011 specifics keywords. use constant KEYWORDS => qw( ABSOLUTE ACTION ADD AFTER ALL ALLOCATE ALTER AND ANY ARE ARRAY AS ASC ASENSITIVE ASSERTION ASYMMETRIC AT ATOMIC AUTHORIZATION AVG BEFORE BEGIN BETWEEN BIGINT BINARY BIT BIT_LENGTH BLOB BOOLEAN BOTH BREADTH BY CALL CALLED CASCADE CASCADED CASE CAST CATALOG CHAR CHARACTER CHARACTER_LENGTH CHAR_LENGTH CHECK CLOB CLOSE COALESCE COLLATE COLLATION COLUMN COMMIT CONDITION CONNECT CONNECTION CONSTRAINT CONSTRAINTS CONSTRUCTOR CONTAINS CONTINUE CONVERT CORRESPONDING COUNT CREATE CROSS CUBE CURRENT CURRENT_DATE CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_CATALOG CURRENT_PATH CURRENT_ROLE CURRENT_SCHEMA CURRENT_TIME CURRENT_TIMESTAMP CURRENT_TRANSFORM_GROUP_FOR_TYPE CURRENT_USER CURSOR CYCLE DATA DATE DAY DEALLOCATE DEC DECIMAL DECLARE DEFAULT DEFERRABLE DEFERRED DELETE DEPTH DEREF DESC DESCRIBE DESCRIPTOR DETERMINISTIC DIAGNOSTICS DISCONNECT DISTINCT DO DOMAIN DOUBLE DROP DYNAMIC EACH ELEMENT ELSE ELSEIF END EPOCH EQUALS ESCAPE EXCEPT EXCEPTION EXEC EXECUTE EXISTS EXIT EXTERNAL EXTRACT FALSE FETCH FILTER FIRST FLOAT FOR FOREIGN FOUND FREE FROM FULL FUNCTION GENERAL GET GLOBAL GO GOTO GRANT GROUP GROUPING HANDLER HAVING HOLD HOUR IDENTITY IF IMMEDIATE IN INDICATOR INITIALLY INNER INOUT INPUT INSENSITIVE INSERT INT INTEGER INTERSECT INTERVAL INTO IS ISOLATION ITERATE JOIN KEY LANGUAGE LARGE LAST LATERAL LEADING LEAVE LEFT LEVEL LIKE LIMIT LOCAL LOCALTIME LOCALTIMESTAMP LOCATOR LOOP LOWER MAP MATCH MAX MEMBER MERGE METHOD MIN MINUTE MODIFIES MODULE MONTH MULTISET NAMES NATIONAL NATURAL NCHAR NCLOB NEW NEXT NO NONE NOT NULL NULLIF NUMERIC OBJECT OCTET_LENGTH OF OFFSET OLD ON ONLY OPEN OPTION OR ORDER ORDINALITY OUT OUTER OUTPUT OVER OVERLAPS PAD PARAMETER PARTIAL PARTITION PATH POSITION PRECISION PREPARE PRESERVE PRIMARY PRIOR PRIVILEGES PROCEDURE PUBLIC RANGE READ READS REAL RECURSIVE REF REFERENCES REFERENCING RELATIVE RELEASE REPEAT RESIGNAL RESTRICT RESULT RETURN RETURNS REVOKE RIGHT ROLE ROLLBACK ROLLUP ROUTINE ROW ROWS SAVEPOINT SCHEMA SCOPE SCROLL SEARCH SECOND SECTION SELECT SENSITIVE SESSION SESSION_USER SET SETS SIGNAL SIMILAR SIZE SMALLINT SOME SPACE SPECIFIC SPECIFICTYPE SQL SQLCODE SQLERROR SQLEXCEPTION SQLSTATE SQLWARNING START STATE STATIC SUBMULTISET SUBSTRING SUM SYMMETRIC SYSTEM SYSTEM_USER TABLE TABLESAMPLE TEMPORARY TEXT THEN TIME TIMESTAMP TIMEZONE_HOUR TIMEZONE_MINUTE TINYINT TO TRAILING TRANSACTION TRANSLATE TRANSLATION TREAT TRIGGER TRIM TRUE UNDER UNDO UNION UNIQUE UNKNOWN UNNEST UNTIL UPDATE UPPER USAGE USER USING VALUE VALUES VARCHAR VARYING VIEW WHEN WHENEVER WHERE WHILE WINDOW WITH WITHIN WITHOUT WORK WRITE YEAR ZONE ); use constant FUNCTIONS => qw(); sub tokenize_sql { my ($query, $remove_white_tokens) = @_; my $re = qr{ ( (?:--)[\ \t\S]* # single line comments | (?:\-\|\-) # range operator "is adjacent to" | (?:\->>|\->|\#>>|\#>|\?\&|\?) # Json Operators | (?:\#<=|\#>=|\#<>|\#<|\#=) # compares tinterval and reltime | (?:>>=|<<=) # inet operators | (?:!!|\@\@\@) # deprecated factorial and full text search operators | (?:\|\|\/|\|\/) # square root and cube root | (?:\@\-\@|\@\@|\#\#|<\->|<<\||\|>>|\&<\||\&<|\|\&>|\&>|<\^|>\^|\?\#|\#|\?<\||\?\-\||\?\-|\?\|\||\?\||\@>|<\@|\~=) # Geometric Operators | (?:~<=~|~>=~|~>~|~<~) # string comparison for pattern matching operator families | (?:!~~|!~~\*|~~\*|~~) # LIKE operators | (?:!~\*|!~|~\*) # regular expression operators | (?:\*=|\*<>|\*<=|\*>=|\*<|\*>) # composite type comparison operators | (?:<>|<=>|>=|<=|==|!=|=|!|<<|>>|<|>|\|\||\||&&|&|-|\+|\*(?!/)|/(?!\*)|\%|~|\^|\?) # operators and tests | [\[\]\(\),;.] # punctuation (parenthesis, comma) | E\'\'(?!\') # escape empty single quoted string | \'\'(?!\') # empty single quoted string | \"\"(?!\"") # empty double quoted string | "(?>(?:(?>[^"\\]+)|""|\\.)*)+" # anything inside double quotes, ungreedy | `(?>(?:(?>[^`\\]+)|``|\\.)*)+` # anything inside backticks quotes, ungreedy | E'(?>(?:(?>[^'\\]+)|''|\\.)*)+' # anything escaped inside single quotes, ungreedy. | '(?>(?:(?>[^'\\]+)|''|\\.)*)+' # anything inside single quotes, ungreedy. | /\*[\ \t\r\n\S]*?\*/ # C style comments | (?:[\w:@]+(?:\.(?:\w+|\*)?)*) # words, standard named placeholders, db.table.*, db.* | (?:\$\w+\$) | (?: \$_\$ | \$\d+ | \${1,2} | \$\w+\$ ) # dollar expressions - eg $_$ $3 $$ $BODY$ | \n # newline | [\t\ ]+ # any kind of white spaces ) }smx; my @query = (); @query = $query =~ m{$re}smxg; if ($remove_white_tokens) { @query = grep(!/^[\s\n\r]*$/, @query); } return wantarray ? @query : \@query; } sub new { my ($class, %options) = @_; my $self = bless {%options}, $class; # Set some defaults. $self->{query} = '' unless defined($self->{query}); $self->{spaces} = 4 unless defined($self->{spaces}); $self->{space} = ' ' unless defined($self->{space}); $self->{break} = "\n" unless defined($self->{break}); $self->{break} = ' ' unless ($self->{spaces} != 0); $self->{wrap} = {} unless defined($self->{wrap}); $self->{keywords} = [] unless defined($self->{keywords}); $self->{functions} = [] unless defined($self->{functions}); $self->{rules} = {} unless defined($self->{rules}); $self->{uc_keywords} = 0 unless defined($self->{uc_keywords}); $self->{uc_functions}= 0 unless defined($self->{uc_functions}); $self->{no_comments} = 0 unless defined($self->{no_comments}); push @{$self->{keywords}}, KEYWORDS; push @{$self->{functions}}, FUNCTIONS; # Initialize internal stuff. $self->{_level} = 0; @{$self->{have_from_clause}} = qw( extract overlay substring trim ); return $self; } # Add more SQL. sub add { my ($self, $addendum) = @_; $addendum =~ s/^\s*/ /; $self->{query} .= $addendum; } # Set SQL to beautify. sub query { my ($self, $query) = @_; $self->{query} = $query if (defined($query)); return $self->{query}; } # Beautify SQL. sub beautify { my ($self) = @_; $self->{_output} = ''; $self->{_level_stack} = []; $self->{_new_line} = 1; $self->{ '_is_in_create' } = 0; my $last; $self->{_tokens} = [tokenize_sql($self->query, 1)]; while (defined(my $token = $self->_token)) { my $rule = $self->_get_rule($token); if ($token =~ /^CREATE$/i) { $self->{ '_is_in_create' } = 1; } elsif ($token =~ /^(AS|IS|RETURN)$/) { $self->{ '_is_in_create' } = 0; } # Allow custom rules to override defaults. if ($rule) { $self->_process_rule($rule, $token); } elsif ($token eq '(') { $self->{ '_is_in_create' }++ if ($self->{ '_is_in_create' }); $self->_add_token($token); if ((uc($last) eq 'AS') || ($self->{ '_is_in_create' } == 2)) { $self->_new_line; } if (!$self->{'_is_in_function'} && $last && grep(/^\Q$last\E$/i, @KEYWORDS2)) { $self->{'_is_in_function'} = 1; } elsif ($self->{'_is_in_function'}) { $self->{'_is_in_function'}++; } if ( ($self->_next_token ne ')') && ($self->_next_token ne '*') ) { $self->{ '_has_from' } = 1 if ($last && grep(/^\Q$last\E$/i, @{$self->{have_from_clause}})); push @{$self->{_level_stack}}, $self->{_level}; $self->_over unless $last and uc($last) eq 'WHERE'; } } elsif ($token eq ')') { $self->{ '_is_in_create' }-- if ($self->{ '_is_in_create' }); $self->{ '_has_from' } = 0; if ($self->{ '_is_in_function' }) { $self->{ '_is_in_function' }--; } $self->_new_line if ($self->_next_token =~ /^SELECT$/i); if ( ($last ne '(') && ($last ne '*') ) { $self->{_level} = pop(@{$self->{_level_stack}}) || 0; } $self->_add_token($token); if ($self->{ '_is_in_create' } <= 1) { my $next_tok = quotemeta($self->_next_token); $self->_new_line if ($self->_next_token and $self->_next_token !~ /^AS$/i and $self->_next_token ne ')' and $self->_next_token !~ /::/ and $self->_next_token ne ';' and $self->_next_token ne ',' and !exists $SYMBOLS{$next_tok} ); } } elsif ($token eq ',') { $self->_add_token($token); $self->_new_line if (!$self->{ 'no_break' } && !$self->{ '_is_in_function' } && $self->_next_token !~ /^('|\-\-)/ && !$self->{ '_is_in_where' }); } elsif ($token eq ';') { $self->{ '_has_from' } = 0; $self->{ '_is_in_where' } = 0; $self->{ '_is_in_from' } = 0; $self->{ '_is_an_update' } = 0; $self->{ '_is_in_create' } = 0; $self->_add_token($token); $self->{break} = "\n" unless ($self->{spaces} != 0); $self->_new_line; # End of statement; remove all indentation. @{$self->{_level_stack}} = (); $self->{_level} = 0; $self->{break} = ' ' unless ($self->{spaces} != 0); } elsif ($token =~ /^(?:SELECT|UPDATE|FROM|WHERE|HAVING|BEGIN|SET|RETURNING|VALUES)$/i) { $self->{ 'no_break' } = 0; if (($token =~ /^FROM$/i) && $self->{ '_has_from' } ) { $self->{ '_has_from' } = 0; $self->_new_line; $self->_add_token( $token ); $self->_new_line; } else { # if we're not in a sub-select, make sure these always are # at the far left (col 1) $self->_back if ( $last and $last ne '(' and uc($last) ne 'FOR' and uc($last) ne 'KEY' ); $self->_new_line if ( $last and uc($last) ne 'FOR' and uc($last) ne 'KEY' ); $self->_add_token( $token ); if ( $token !~ /^SET$/i || $self->{ '_is_an_update' } ) { $self->_new_line if ($self->_next_token and $self->_next_token ne '(' and $self->_next_token ne ';' ); } $self->_over; } if ($token =~ /^UPDATE$/i and !$last) { $self->{ '_is_an_update' } = 1; } if ($token =~ /^WHERE$/i) { $self->{ '_is_in_where' } = 1; $self->{ 'is_in_from' } = 0; } else { $self->{ '_is_in_where' } = 0; if ($token =~ /^FROM$/i) { $self->{ 'is_in_from' } = 1; } else { $self->{ 'is_in_from' } = 0; } } } elsif ($token =~ /^(?:GROUP|ORDER|LIMIT)$/i) { $self->_back; $self->_new_line; $self->_add_token($token); $self->{ '_is_in_where' } = 0; $self->{ '_is_in_from' } = 0; } elsif ($token =~ /^(?:BY)$/i) { $self->_add_token($token); $self->_new_line; $self->_over; } elsif ($token =~ /^(?:CASE)$/i) { $self->_add_token($token); $self->_over; } elsif ($token =~ /^(?:WHEN)$/i) { $self->_new_line; $self->_add_token($token); } elsif ($token =~ /^(?:ELSE)$/i) { $self->_new_line; $self->_add_token($token); } elsif ($token =~ /^(?:END)$/i) { $self->_back; $self->_new_line; $self->_add_token($token); } elsif ($token =~ /^(?:UNION|INTERSECT|EXCEPT)$/i) { $self->{ 'no_break' } = 0; $self->_back unless $last and $last eq '('; $self->_new_line; $self->_add_token($token); $self->_new_line if ($self->_next_token and $self->_next_token ne '(' and $self->_next_token !~ /^ALL$/i); $self->_over; } elsif ($token =~ /^(?:LEFT|RIGHT|INNER|OUTER|CROSS|NATURAL)$/i) { $self->{ 'no_break' } = 0; $self->_back unless $last and $last eq ')'; if ($token =~ /(?:LEFT|RIGHT|CROSS|NATURAL)$/i) { $self->_new_line; $self->_over if ($self->{_level} == 0); } if ( ($token =~ /(?:INNER|OUTER)$/i) && ($last !~ /(?:LEFT|RIGHT|CROSS|NATURAL)$/i) ) { $self->_new_line; $self->_over if ($self->{_level} == 0); } $self->_add_token($token); } elsif ($token =~ /^(?:JOIN)$/i) { $self->{ 'no_break' } = 0; if (!$last or $last !~ /^(?:LEFT|RIGHT|INNER|OUTER|CROSS|NATURAL)$/i) { $self->_new_line; } $self->_add_token($token); if ( $last && $last =~ /^(?:INNER|OUTER)$/i ) { $self->_over; } } elsif ($token =~ /^(?:AND|OR)$/i) { $self->{ 'no_break' } = 0; if (!$last or ($last !~ /^(?:CREATE)$/i) ) { $self->_new_line; } $self->_add_token($token); } elsif ($token =~ /^--/) { if (!$self->{no_comments}) { $self->_add_token($token); $self->{break} = "\n" unless ($self->{spaces} != 0); $self->_new_line; $self->{break} = ' ' unless ($self->{spaces} != 0); } } elsif ($token =~ /^\/\*.*\*\/$/s) { if (!$self->{no_comments}) { $token =~ s/\n[\s\t]+\*/\n\*/gs; $self->_new_line; $self->_add_token($token); $self->{break} = "\n" unless ($self->{spaces} != 0); $self->_new_line; $self->{break} = " " unless ($self->{spaces} != 0); } } elsif ($token =~ /^USING$/i) { if (!$self->{ 'is_in_from' }) { $self->_new_line; } else { # USING from join clause disable line break $self->{ 'no_break' } = 1; } $self->_add_token($token); } else { $self->_add_token($token, $last); } $last = $token; } $self->_new_line; $self->{_output}; } # Add a token to the beautified string. sub _add_token { my ($self, $token, $last_token) = @_; if ($self->{wrap}) { my $wrap; if ($self->_is_keyword($token)) { $wrap = $self->{wrap}->{keywords}; } elsif ($self->_is_constant($token)) { $wrap = $self->{wrap}->{constants}; } if ($wrap) { $token = $wrap->[0] . $token . $wrap->[1]; } } my $last_is_dot = defined($last_token) && $last_token eq '.'; if (!$self->_is_punctuation($token) and !$last_is_dot) { my $sp = $self->_indent; if ( (!defined($last_token) || $last_token ne '(') && $token ne ')' && ($token !~ /^::/) ) { $self->{ '_output' } .= $sp if ($token ne ')' && defined($last_token) && $last_token ne '::' && ($token ne '(' || !$self->_is_function( $last_token )) ); $self->{ '_output' } .= $sp if (!defined($last_token) && $token); } elsif ( $self->{ '_is_in_create' } == 2 && defined($last_token)) { $self->{ '_output' } .= $sp if ($last_token ne '::' and ($last_token ne '(' || !$self->{ '_is_in_index' })); } $token =~ s/\n/\n$sp/gs; } # uppercase keywords if ($self->{uc_keywords} && $self->_is_keyword($token)) { $token = lc($token) if ($self->{uc_keywords} == 1); $token = uc($token) if ($self->{uc_keywords} == 2); $token = ucfirst(lc($token)) if ($self->{uc_keywords} == 3); } # uppercase functions if ($self->{uc_functions} && (my $fct = $self->_is_function($token))) { $token =~ s/$fct/\L$fct\E/i if ($self->{uc_functions} == 1); $token =~ s/$fct/\U$fct\E/i if ($self->{uc_functions} == 2); $fct = ucfirst(lc($fct)); $token =~ s/$fct/$fct/i if ($self->{uc_functions} == 3); } $self->{_output} .= $token; $self->{_output} =~ s/\(\s+\(/\(\(/gs; # This can't be the beginning of a new line anymore. $self->{_new_line} = 0; } # Increase the indentation level. sub _over { my ($self) = @_; ++$self->{_level}; } # Decrease the indentation level. sub _back { my ($self) = @_; --$self->{_level} if ($self->{_level} > 0); } # Return a string of spaces according to the current indentation level and the # spaces setting for indenting. sub _indent { my ($self) = @_; if ($self->{_new_line}) { return $self->{space} x ($self->{spaces} * $self->{_level}); } else { return $self->{space}; } } # Add a line break, but make sure there are no empty lines. sub _new_line { my ($self) = @_; $self->{_output} .= $self->{break} unless ($self->{_new_line}); $self->{_new_line} = 1; } # Have a look at the token that's coming up next. sub _next_token { my ($self) = @_; return @{$self->{_tokens}} ? $self->{_tokens}->[0] : undef; } # Get the next token, removing it from the list of remaining tokens. sub _token { my ($self) = @_; return shift @{$self->{_tokens}}; } # Check if a token is a known SQL keyword. sub _is_keyword { my ($self, $token) = @_; return ~~ grep {$_ eq uc($token)} @{$self->{keywords}}; } # Check if a token is a known SQL function. sub _is_function { my ($self, $token) = @_; my @ret = grep($token =~ /\b[\.]*$_$/i, @{$self->{functions}}); return $ret[0]; } # Add new keywords to highlight. sub add_keywords { my $self = shift; for my $keyword (@_) { push @{$self->{keywords}}, ref($keyword) ? @{$keyword} : $keyword; } } # Add new functions to highlight. sub add_functions { my $self = shift; for my $function (@_) { push @{$self->{functions}}, ref($function) ? @{$function} : $function; } } # Add new rules. sub add_rule { my ($self, $format, $token) = @_; my $rules = $self->{rules} ||= {}; my $group = $rules->{$format} ||= []; push @{$group}, ref($token) ? @{$token} : $token; } # Find custom rule for a token. sub _get_rule { my ($self, $token) = @_; values %{$self->{rules}}; # Reset iterator. while (my ($rule, $list) = each %{$self->{rules}}) { return $rule if (grep {uc($token) eq uc($_)} @$list); } return; } sub _process_rule { my ($self, $rule, $token) = @_; my $format = { break => sub {$self->_new_line}, over => sub {$self->_over}, back => sub {$self->_back}, token => sub {$self->_add_token($token)}, push => sub {push @{$self->{_level_stack}}, $self->{_level}}, pop => sub {$self->{_level} = pop(@{$self->{_level_stack}}) || 0}, reset => sub {$self->{_level} = 0; @{$self->{_level_stack}} = ();}, }; for (split /-/, lc $rule) { &{$format->{$_}} if ($format->{$_}); } } # Check if a token is a constant. sub _is_constant { my ($self, $token) = @_; return ($token =~ /^\d+$/ or $token =~ /^(['"`]).*\1$/); } # Check if a token is punctuation. sub _is_punctuation { my ($self, $token) = @_; return ($token =~ /^[,;.]$/); } } sub get_log_file { my $logf = shift; my $sample_only = shift; my $lfile = undef; chomp($logf); # get file size my $totalsize = 0; if ( $journalctl_cmd && ($logf =~ m/\Q$journalctl_cmd\E/) ) { $totalsize = 0; } elsif (!$remote_host) { $totalsize = (stat("$logf"))[7] || 0 if ($logf ne '-'); } elsif ($logf !~ /\.(gz|bz2|zip|xz)$/i) { &logmsg('DEBUG', "Looking for file size using command: $ssh_command \"ls -l $logf\" | awk '{print \$5}'"); $totalsize = `$ssh_command "ls -l $logf" | awk '{print \$5}'`; chomp($totalsize); if ($totalsize eq '') { localdie("FATAL: can't get size of remote file, please check what's going wrong with command: $ssh_command \"ls -l $logf\" | awk '{print \$5}'\n"); } &logmsg('DEBUG', "Remote file size: $totalsize"); if (!$totalsize) { return $totalsize; } } my $iscompressed = 1; # Open a file handle if ( $journalctl_cmd && ($logf =~ m/\Q$journalctl_cmd\E/) ) { # For journalctl command we need to use a pipe as file handle if (!$remote_host) { open($lfile, "$logf |") || localdie("FATAL: cannot read output of commanf: $logf. $!\n"); } else { if (!$sample_only) { &logmsg('DEBUG', "Retrieving log entries using command: $ssh_command \"$logf\" |"); # Open a pipe to remote journalctl program open($lfile,"$ssh_command \"$logf\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"$logf\". $!\n"); } else { &logmsg('DEBUG', "Retrieving log entries using command: $ssh_command \"$logf -n 100\" |"); # Open a pipe to remote journalctl program open($lfile,"$ssh_command \"$logf -n 100\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"$logf -n 100\". $!\n"); } } $iscompressed = 0; } elsif ($logf !~ /\.(gz|bz2|zip|xz)$/i) { if (!$remote_host) { open($lfile, $logf) || localdie("FATAL: cannot read log file $logf. $!\n"); } else { if (!$sample_only) { &logmsg('DEBUG', "Retrieving log entries using command: $ssh_command \" cat $logf\" |"); # Open a pipe to cat program open($lfile,"$ssh_command \"cat $logf\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"cat $logf\". $!\n"); } else { # Open a pipe to cat program open($lfile,"$ssh_command \"tail -n 100 $logf\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"tail -n 100 $logf\". $!\n"); } } $totalsize = 0 if ($logf eq '-'); $iscompressed = 0; } else { my $uncompress = $zcat; my $sample_cmd = 'zgrep'; if (($logf =~ /\.bz2/i) && ($zcat =~ /^$zcat_cmd$/)) { $uncompress = $bzcat; $sample_cmd = 'bzgrep'; } elsif (($logf =~ /\.zip/i) && ($zcat =~ /^$zcat_cmd$/)) { $uncompress = $ucat; } elsif (($logf =~ /\.xz/i) && ($zcat =~ /^$zcat_cmd$/)) { $uncompress = $xzcat; $sample_cmd = 'xzgrep'; } if (!$remote_host) { &logmsg('DEBUG', "Compressed log file, will use command: $uncompress \"$logf\""); # Open a pipe to zcat program for compressed log open($lfile,"$uncompress \"$logf\" |") || localdie("FATAL: cannot read from pipe to $uncompress \"$logf\". $!\n"); } else { if (!$sample_only) { &logmsg('DEBUG', "Compressed log file, will use command: $ssh_command \"$uncompress $logf\""); # Open a pipe to zcat program for compressed log open($lfile,"$ssh_command \"$uncompress $logf\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"$uncompress $logf\". $!\n"); } else { &logmsg('DEBUG', "Compressed log file, will use command: $ssh_command \"$uncompress $logf\""); # Open a pipe to zcat program for compressed log open($lfile,"$ssh_command \"$sample_cmd -m 100 '[1234567890]' $logf\" |") || localdie("FATAL: cannot read from pipe to $ssh_command \"$sample_cmd -m 100 '' $logf\". $!\n"); } } # Real size of the file is unknown, try to find it # bz2 does not report real size $totalsize = 0; if ($logf =~ /\.(gz|zip|xz)$/i) { my $cmd_file_size = $gzip_uncompress_size; if ($logf =~ /\.zip$/i) { $cmd_file_size = $zip_uncompress_size; } elsif ($logf =~ /\.xz$/i) { $cmd_file_size = $xz_uncompress_size; } $cmd_file_size =~ s/\%f/$logf/g; if (!$remote_host) { &logmsg('DEBUG', "Looking for file size using command: $cmd_file_size"); $totalsize = `$cmd_file_size`; } else { &logmsg('DEBUG', "Looking for remote file size using command: $ssh_command $cmd_file_size"); $totalsize = `$ssh_command $cmd_file_size`; } chomp($totalsize); } $queue_size = 0; } # In list context returns the filehandle and the size of the file if (wantarray()) { return ($lfile, $totalsize, $iscompressed); } # In scalar context return size only close($lfile); return $totalsize; } sub split_logfile { my $logf = shift; my $saved_pos = shift; # CSV file can't be parsed using multiprocessing return (0, -1) if ( $format eq 'csv' ); # get file size my $totalsize = (stat("$logf"))[7] || 0; # Real size of a compressed file is unknown, try to find it # bz2 does not report real size if ($logf =~ /\.(gz|zip|xz)$/i) { $totalsize = 0; my $cmd_file_size = $gzip_uncompress_size; if ($logf =~ /\.zip$/i) { $cmd_file_size = $zip_uncompress_size; } elsif ($logf =~ /\.xz$/i) { $cmd_file_size = $xz_uncompress_size; } $cmd_file_size =~ s/\%f/$logf/g; $totalsize = `$cmd_file_size`; chomp($totalsize); return (0, $totalsize) if ($totalsize); } elsif ($logf =~ /\.bz2$/i) { $totalsize = 0; } # Only uncompressed file can be splitted return (0, -1) if (!$totalsize); my @chunks = (0); # Seek to the last saved position if ($last_parsed && $saved_pos) { if ($saved_pos < $totalsize) { $chunks[0] = $saved_pos; } } # With small files splitting is inefficient if ($totalsize <= 16777216) { return ($chunks[0], $totalsize); } my $i = 1; my ($lfile, $null) = &get_log_file($logf); # Get file handle to the file while ($i < $queue_size) { my $pos = int(($totalsize/$queue_size) * $i); if ($pos > $chunks[0]) { $lfile->seek($pos, 0); #Move the offset to the BEGINNING of each line, because the logic in process_file requires so $pos= $pos + length(<$lfile>) - 1; push(@chunks, $pos) if ($pos < $totalsize); } last if ($pos >= $totalsize); $i++; } $lfile->close(); push(@chunks, $totalsize); return @chunks; } # Return the week number of the year for a given date sub get_week_number { my ($year, $month, $day) = @_; # %U The week number of the current year as a decimal number, range 00 to 53, starting with the first # Sunday as the first day of week 01. # %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53, # where week 1 is the first week that has at least 4 days in the new year. # %W The week number of the current year as a decimal number, range 00 to 53, starting with the first # Monday as the first day of week 01. # Check if the date is valid first my $datefmt = POSIX::strftime("%Y-%m-%d", 1, 1, 1, $day, $month - 1, $year - 1900); if ($datefmt ne "$year-$month-$day") { return -1; } my $weekNumber = ''; if (!$week_start_monday) { $weekNumber = POSIX::strftime("%U", 1, 1, 1, $day, $month - 1, $year - 1900); } else { $weekNumber = POSIX::strftime("%W", 1, 1, 1, $day, $month - 1, $year - 1900); } return sprintf("%02d", $weekNumber+1); } # Returns day number of the week of a given days sub get_day_of_week { my ($year, $month, $day) = @_; # %w The day of the week as a decimal, range 0 to 6, Sunday being 0. my $weekDay = ''; if (!$week_start_monday) { # Start on sunday = 0 $weekDay = POSIX::strftime("%w", 1,1,1,$day,--$month,$year-1900); } else { # Start on monday = 1 $weekDay = POSIX::strftime("%u", 1,1,1,$day,--$month,$year-1900); $weekDay--; } return $weekDay; } # Returns all days following the week number sub get_wdays_per_month { my $wn = shift; my ($year, $month) = split(/\-/, shift); my @months = (); my @retdays = (); $month ||= '01'; push(@months, "$year$month"); my $start_month = $month; if ($month eq '01') { unshift(@months, ($year - 1) . "12"); } else { unshift(@months, $year . sprintf("%02d", $month - 1)); } if ($month == 12) { push(@months, ($year+1) . "01"); } else { push(@months, $year . sprintf("%02d", $month + 1)); } foreach my $d (@months) { $d =~ /^(\d{4})(\d{2})$/; my $y = $1; my $m = $2; foreach my $day ("01" .. "31") { # Check if the date is valid first my $datefmt = POSIX::strftime("%Y-%m-%d", 1, 1, 1, $day, $m - 1, $y - 1900); if ($datefmt ne "$y-$m-$day") { next; } my $weekNumber = ''; if (!$week_start_monday) { $weekNumber = POSIX::strftime("%U", 1, 1, 1, $day, $m - 1, $y - 1900); } else { $weekNumber = POSIX::strftime("%W", 1, 1, 1, $day, $m - 1, $y - 1900); } if ( ($weekNumber == $wn) || ( ($weekNumber eq '00') && (($wn == 1) || ($wn >= 52)) ) ) { push(@retdays, "$year-$m-$day"); return @retdays if ($#retdays == 6); } next if ($weekNumber > $wn); } } return @retdays; } sub IsLeapYear { return ((($_[0] & 3) == 0) && (($_[0] % 100 != 0) || ($_[0] % 400 == 0))); } #### # Display calendar #### sub get_calendar { my ($year, $month) = @_; my $str = "\n"; my @wday = qw(Su Mo Tu We Th Fr Sa); my @std_day = qw(Su Mo Tu We Th Fr Sa); if ($week_start_monday) { @wday = qw(Mo Tu We Th Fr Sa Su); @std_day = qw(Mo Tu We Th Fr Sa Su); } my %day_lbl = (); for (my $i = 0; $i <= $#wday; $i++) { $day_lbl{$wday[$i]} = $wday[$i]; } $str .= ""; map { $str .= ''; } @wday; $str .= "\n\n"; my @currow = ('','','','','','',''); my $d = ''; my $wd = 0; my $wn = 0; my $week = ''; for $d ("01" .. "31") { last if (($d == 31) && grep(/^$month$/, '04','06','09','11')); last if (($d == 30) && ($month eq '02')); last if (($d == 29) && ($month eq '02') && !&IsLeapYear($year)); $wd = &get_day_of_week($year,$month,$d); $wn = &get_week_number($year,$month,$d); next if ($wn == -1); if ( !-e "$outdir/$year/$month/$d/index.html" ) { $currow[$wd] = ""; } else { $currow[$wd] = ""; } if ($wd == 6) { $week = sprintf("%02d", $wn); if (-e "$outdir/$year/week-$week/index.html") { $week = ""; } else { $week = ""; } map { $_ = "" if ($_ eq ''); } @currow; $str .= "$week" . join('', @currow) . "\n"; @currow = ('','','','','','',''); } } if ( ($wd != 6) || ($currow[0] ne '') ) { $week = sprintf("%02d", $wn); if (-e "$outdir/$year/week-$week/index.html") { $week = ""; } else { $week = ""; } map { $_ = "" if ($_ eq ''); } @currow; $str .= "$week" . join('', @currow) . "\n"; @currow = ('','','','','','',''); } $str .= "\n
     ' . $day_lbl{$_} . '
    $d$d$week $week 
    $week $week 
    \n"; my %month_name = ( '01' => 'January', '02' => 'February', '03' => 'March', '04' => 'April', '05' => 'May', '06' => 'June', '07' => 'July', '08' => 'August', '09' => 'September', '10' => 'October', '11' => 'November', '12' => 'December' ); return qq{
     

    $month_name{$month}

    $str
    }; } sub _gethostbyaddr { my $ip = shift; my $host = undef; unless(exists $CACHE_DNS{$ip}) { eval { local $SIG{ALRM} = sub { die "DNS lookup timeout.\n"; }; alarm($DNSLookupTimeout); $host = gethostbyaddr(inet_aton($ip), AF_INET); alarm(0); }; if ($@) { $CACHE_DNS{$ip} = undef; #printf "_gethostbyaddr timeout : %s\n", $ip; } else { $CACHE_DNS{$ip} = $host; #printf "_gethostbyaddr success : %s (%s)\n", $ip, $host; } } return $CACHE_DNS{$ip} || $ip; } sub localdie { my $msg = shift; print STDERR "$msg"; unlink("$PID_DIR/pgbadger.pid"); exit 1; } sub skip_unwanted_line { # Skip unwanted lines if ($#exclude_time >= 0) { my $found = 0; foreach (@exclude_time) { if ($prefix_vars{'t_timestamp'} =~ /$_/) { $found = 1; last; } } return 1 if ($found); } return 1 if ($from && ($from gt $prefix_vars{'t_timestamp'})); if ($to && ($to lt $prefix_vars{'t_timestamp'})) { return -1; } return 0; } __DATA__ WRFILE: jquery.jqplot.min.css .jqplot-target{position:relative;color:#666;font-family:"Trebuchet MS",Arial,Helvetica,sans-serif;font-size:1em}.jqplot-axis{font-size:.75em}.jqplot-xaxis{margin-top:10px}.jqplot-x2axis{margin-bottom:10px}.jqplot-yaxis{margin-right:10px}.jqplot-y2axis,.jqplot-y3axis,.jqplot-y4axis,.jqplot-y5axis,.jqplot-y6axis,.jqplot-y7axis,.jqplot-y8axis,.jqplot-y9axis,.jqplot-yMidAxis{margin-left:10px;margin-right:10px}.jqplot-axis-tick,.jqplot-xaxis-tick,.jqplot-yaxis-tick,.jqplot-x2axis-tick,.jqplot-y2axis-tick,.jqplot-y3axis-tick,.jqplot-y4axis-tick,.jqplot-y5axis-tick,.jqplot-y6axis-tick,.jqplot-y7axis-tick,.jqplot-y8axis-tick,.jqplot-y9axis-tick,.jqplot-yMidAxis-tick{position:absolute;white-space:pre}.jqplot-xaxis-tick{top:0;left:15px;vertical-align:top}.jqplot-x2axis-tick{bottom:0;left:15px;vertical-align:bottom}.jqplot-yaxis-tick{right:0;top:15px;text-align:right}.jqplot-yaxis-tick.jqplot-breakTick{right:-20px;margin-right:0;padding:1px 5px 1px 5px;z-index:2;font-size:1.5em}.jqplot-y2axis-tick,.jqplot-y3axis-tick,.jqplot-y4axis-tick,.jqplot-y5axis-tick,.jqplot-y6axis-tick,.jqplot-y7axis-tick,.jqplot-y8axis-tick,.jqplot-y9axis-tick{left:0;top:15px;text-align:left}.jqplot-yMidAxis-tick{text-align:center;white-space:nowrap}.jqplot-xaxis-label{margin-top:10px;font-size:11pt;position:absolute}.jqplot-x2axis-label{margin-bottom:10px;font-size:11pt;position:absolute}.jqplot-yaxis-label{margin-right:10px;font-size:11pt;position:absolute}.jqplot-yMidAxis-label{font-size:11pt;position:absolute}.jqplot-y2axis-label,.jqplot-y3axis-label,.jqplot-y4axis-label,.jqplot-y5axis-label,.jqplot-y6axis-label,.jqplot-y7axis-label,.jqplot-y8axis-label,.jqplot-y9axis-label{font-size:11pt;margin-left:10px;position:absolute}.jqplot-meterGauge-tick{font-size:.75em;color:#999}.jqplot-meterGauge-label{font-size:1em;color:#999}table.jqplot-table-legend{margin-top:12px;margin-bottom:12px;margin-left:12px;margin-right:12px}table.jqplot-table-legend,table.jqplot-cursor-legend{background-color:rgba(255,255,255,0.6);border:1px solid #ccc;position:absolute;font-size:.75em}td.jqplot-table-legend{vertical-align:middle}td.jqplot-seriesToggle:hover,td.jqplot-seriesToggle:active{cursor:pointer}.jqplot-table-legend .jqplot-series-hidden{text-decoration:line-through}div.jqplot-table-legend-swatch-outline{border:1px solid #ccc;padding:1px}div.jqplot-table-legend-swatch{width:0;height:0;border-top-width:5px;border-bottom-width:5px;border-left-width:6px;border-right-width:6px;border-top-style:solid;border-bottom-style:solid;border-left-style:solid;border-right-style:solid}.jqplot-title{top:0;left:0;padding-bottom:.5em;font-size:1.2em}table.jqplot-cursor-tooltip{border:1px solid #ccc;font-size:.75em}.jqplot-cursor-tooltip{border:1px solid #ccc;font-size:.75em;white-space:nowrap;background:rgba(208,208,208,0.5);padding:1px}.jqplot-highlighter-tooltip,.jqplot-canvasOverlay-tooltip{border:1px solid #ccc;font-size:.75em;white-space:nowrap;background:rgba(208,208,208,0.5);padding:1px}.jqplot-point-label{font-size:.75em;z-index:2}td.jqplot-cursor-legend-swatch{vertical-align:middle;text-align:center}div.jqplot-cursor-legend-swatch{width:1.2em;height:.7em}.jqplot-error{text-align:center}.jqplot-error-message{position:relative;top:46%;display:inline-block}div.jqplot-bubble-label{font-size:.8em;padding-left:2px;padding-right:2px;color:rgb(20%,20%,20%)}div.jqplot-bubble-label.jqplot-bubble-label-highlight{background:rgba(90%,90%,90%,0.7)}div.jqplot-noData-container{text-align:center;background-color:rgba(96%,96%,96%,0.3)} WRFILE: jquery.min.js /* * jQuery JavaScript Library v1.9.1 * http://jquery.com/ * * Includes Sizzle.js * http://sizzlejs.com/ * * Copyright 2005, 2012 jQuery Foundation, Inc. and other contributors * Released under the MIT license * http://jquery.org/license * * Date: 2013-2-4 */ (function(a2,aG){var ai,w,aC=typeof aG,l=a2.document,aL=a2.location,bi=a2.jQuery,H=a2.$,aa={},a6=[],s="1.9.1",aI=a6.concat,ao=a6.push,a4=a6.slice,aM=a6.indexOf,z=aa.toString,V=aa.hasOwnProperty,aQ=s.trim,bJ=function(e,b3){return new bJ.fn.init(e,b3,w)},bA=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ac=/\S+/g,C=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,br=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,a=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,bh=/^[\],:{}\s]*$/,bk=/(?:^|:|,)(?:\s*\[)+/g,bG=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,aZ=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,bS=/^-ms-/,aV=/-([\da-z])/gi,M=function(e,b3){return b3.toUpperCase()},bW=function(e){if(l.addEventListener||e.type==="load"||l.readyState==="complete"){bl();bJ.ready()}},bl=function(){if(l.addEventListener){l.removeEventListener("DOMContentLoaded",bW,false);a2.removeEventListener("load",bW,false)}else{l.detachEvent("onreadystatechange",bW);a2.detachEvent("onload",bW)}};bJ.fn=bJ.prototype={jquery:s,constructor:bJ,init:function(e,b5,b4){var b3,b6;if(!e){return this}if(typeof e==="string"){if(e.charAt(0)==="<"&&e.charAt(e.length-1)===">"&&e.length>=3){b3=[null,e,null]}else{b3=br.exec(e)}if(b3&&(b3[1]||!b5)){if(b3[1]){b5=b5 instanceof bJ?b5[0]:b5;bJ.merge(this,bJ.parseHTML(b3[1],b5&&b5.nodeType?b5.ownerDocument||b5:l,true));if(a.test(b3[1])&&bJ.isPlainObject(b5)){for(b3 in b5){if(bJ.isFunction(this[b3])){this[b3](b5[b3])}else{this.attr(b3,b5[b3])}}}return this}else{b6=l.getElementById(b3[2]);if(b6&&b6.parentNode){if(b6.id!==b3[2]){return b4.find(e)}this.length=1;this[0]=b6}this.context=l;this.selector=e;return this}}else{if(!b5||b5.jquery){return(b5||b4).find(e)}else{return this.constructor(b5).find(e)}}}else{if(e.nodeType){this.context=this[0]=e;this.length=1;return this}else{if(bJ.isFunction(e)){return b4.ready(e)}}}if(e.selector!==aG){this.selector=e.selector;this.context=e.context}return bJ.makeArray(e,this)},selector:"",length:0,size:function(){return this.length},toArray:function(){return a4.call(this)},get:function(e){return e==null?this.toArray():(e<0?this[this.length+e]:this[e])},pushStack:function(e){var b3=bJ.merge(this.constructor(),e);b3.prevObject=this;b3.context=this.context;return b3},each:function(b3,e){return bJ.each(this,b3,e)},ready:function(e){bJ.ready.promise().done(e);return this},slice:function(){return this.pushStack(a4.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(b4){var e=this.length,b3=+b4+(b4<0?e:0);return this.pushStack(b3>=0&&b30){return}ai.resolveWith(l,[bJ]);if(bJ.fn.trigger){bJ(l).trigger("ready").off("ready")}},isFunction:function(e){return bJ.type(e)==="function"},isArray:Array.isArray||function(e){return bJ.type(e)==="array"},isWindow:function(e){return e!=null&&e==e.window},isNumeric:function(e){return !isNaN(parseFloat(e))&&isFinite(e)},type:function(e){if(e==null){return String(e)}return typeof e==="object"||typeof e==="function"?aa[z.call(e)]||"object":typeof e},isPlainObject:function(b5){if(!b5||bJ.type(b5)!=="object"||b5.nodeType||bJ.isWindow(b5)){return false}try{if(b5.constructor&&!V.call(b5,"constructor")&&!V.call(b5.constructor.prototype,"isPrototypeOf")){return false}}catch(b4){return false}var b3;for(b3 in b5){}return b3===aG||V.call(b5,b3)},isEmptyObject:function(b3){var e;for(e in b3){return false}return true},error:function(e){throw new Error(e)},parseHTML:function(b6,b4,b5){if(!b6||typeof b6!=="string"){return null}if(typeof b4==="boolean"){b5=b4;b4=false}b4=b4||l;var b3=a.exec(b6),e=!b5&&[];if(b3){return[b4.createElement(b3[1])]}b3=bJ.buildFragment([b6],b4,e);if(e){bJ(e).remove()}return bJ.merge([],b3.childNodes)},parseJSON:function(e){if(a2.JSON&&a2.JSON.parse){return a2.JSON.parse(e)}if(e===null){return e}if(typeof e==="string"){e=bJ.trim(e);if(e){if(bh.test(e.replace(bG,"@").replace(aZ,"]").replace(bk,""))){return(new Function("return "+e))()}}}bJ.error("Invalid JSON: "+e)},parseXML:function(b5){var b3,b4;if(!b5||typeof b5!=="string"){return null}try{if(a2.DOMParser){b4=new DOMParser();b3=b4.parseFromString(b5,"text/xml")}else{b3=new ActiveXObject("Microsoft.XMLDOM");b3.async="false";b3.loadXML(b5)}}catch(b6){b3=aG}if(!b3||!b3.documentElement||b3.getElementsByTagName("parsererror").length){bJ.error("Invalid XML: "+b5)}return b3},noop:function(){},globalEval:function(e){if(e&&bJ.trim(e)){(a2.execScript||function(b3){a2["eval"].call(a2,b3)})(e)}},camelCase:function(e){return e.replace(bS,"ms-").replace(aV,M)},nodeName:function(b3,e){return b3.nodeName&&b3.nodeName.toLowerCase()===e.toLowerCase()},each:function(b7,b8,b3){var b6,b4=0,b5=b7.length,e=ab(b7);if(b3){if(e){for(;b40&&(b3-1) in b4)}w=bJ(l);var bY={};function ae(b3){var e=bY[b3]={};bJ.each(b3.match(ac)||[],function(b5,b4){e[b4]=true});return e}bJ.Callbacks=function(cc){cc=typeof cc==="string"?(bY[cc]||ae(cc)):bJ.extend({},cc);var b6,b5,e,b7,b8,b4,b9=[],ca=!cc.once&&[],b3=function(cd){b5=cc.memory&&cd;e=true;b8=b4||0;b4=0;b7=b9.length;b6=true;for(;b9&&b8-1){b9.splice(ce,1);if(b6){if(ce<=b7){b7--}if(ce<=b8){b8--}}}})}return this},has:function(cd){return cd?bJ.inArray(cd,b9)>-1:!!(b9&&b9.length)},empty:function(){b9=[];return this},disable:function(){b9=ca=b5=aG;return this},disabled:function(){return !b9},lock:function(){ca=aG;if(!b5){cb.disable()}return this},locked:function(){return !ca},fireWith:function(ce,cd){cd=cd||[];cd=[ce,cd.slice?cd.slice():cd];if(b9&&(!e||ca)){if(b6){ca.push(cd)}else{b3(cd)}}return this},fire:function(){cb.fireWith(this,arguments);return this},fired:function(){return !!e}};return cb};bJ.extend({Deferred:function(b4){var b3=[["resolve","done",bJ.Callbacks("once memory"),"resolved"],["reject","fail",bJ.Callbacks("once memory"),"rejected"],["notify","progress",bJ.Callbacks("memory")]],b5="pending",b6={state:function(){return b5},always:function(){e.done(arguments).fail(arguments);return this},then:function(){var b7=arguments;return bJ.Deferred(function(b8){bJ.each(b3,function(ca,b9){var cc=b9[0],cb=bJ.isFunction(b7[ca])&&b7[ca];e[b9[1]](function(){var cd=cb&&cb.apply(this,arguments);if(cd&&bJ.isFunction(cd.promise)){cd.promise().done(b8.resolve).fail(b8.reject).progress(b8.notify)}else{b8[cc+"With"](this===b6?b8.promise():this,cb?[cd]:arguments)}})});b7=null}).promise()},promise:function(b7){return b7!=null?bJ.extend(b7,b6):b6}},e={};b6.pipe=b6.then;bJ.each(b3,function(b8,b7){var ca=b7[2],b9=b7[3];b6[b7[1]]=ca.add;if(b9){ca.add(function(){b5=b9},b3[b8^1][2].disable,b3[2][2].lock)}e[b7[0]]=function(){e[b7[0]+"With"](this===e?b6:this,arguments);return this};e[b7[0]+"With"]=ca.fireWith});b6.promise(e);if(b4){b4.call(e,e)}return e},when:function(b6){var b4=0,b8=a4.call(arguments),e=b8.length,b3=e!==1||(b6&&bJ.isFunction(b6.promise))?e:0,cb=b3===1?b6:bJ.Deferred(),b5=function(cd,ce,cc){return function(cf){ce[cd]=this;cc[cd]=arguments.length>1?a4.call(arguments):cf;if(cc===ca){cb.notifyWith(ce,cc)}else{if(!(--b3)){cb.resolveWith(ce,cc)}}}},ca,b7,b9;if(e>1){ca=new Array(e);b7=new Array(e);b9=new Array(e);for(;b4
    a";cd=b3.getElementsByTagName("*");cb=b3.getElementsByTagName("a")[0];if(!cd||!cb||!cd.length){return{}}cc=l.createElement("select");b5=cc.appendChild(l.createElement("option"));ca=b3.getElementsByTagName("input")[0];cb.style.cssText="top:1px;float:left;opacity:.5";ce={getSetAttribute:b3.className!=="t",leadingWhitespace:b3.firstChild.nodeType===3,tbody:!b3.getElementsByTagName("tbody").length,htmlSerialize:!!b3.getElementsByTagName("link").length,style:/top/.test(cb.getAttribute("style")),hrefNormalized:cb.getAttribute("href")==="/a",opacity:/^0.5/.test(cb.style.opacity),cssFloat:!!cb.style.cssFloat,checkOn:!!ca.value,optSelected:b5.selected,enctype:!!l.createElement("form").enctype,html5Clone:l.createElement("nav").cloneNode(true).outerHTML!=="<:nav>",boxModel:l.compatMode==="CSS1Compat",deleteExpando:true,noCloneEvent:true,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableMarginRight:true,boxSizingReliable:true,pixelPosition:false};ca.checked=true;ce.noCloneChecked=ca.cloneNode(true).checked;cc.disabled=true;ce.optDisabled=!b5.disabled;try{delete b3.test}catch(b8){ce.deleteExpando=false}ca=l.createElement("input");ca.setAttribute("value","");ce.input=ca.getAttribute("value")==="";ca.value="t";ca.setAttribute("type","radio");ce.radioValue=ca.value==="t";ca.setAttribute("checked","t");ca.setAttribute("name","t");b9=l.createDocumentFragment();b9.appendChild(ca);ce.appendChecked=ca.checked;ce.checkClone=b9.cloneNode(true).cloneNode(true).lastChild.checked;if(b3.attachEvent){b3.attachEvent("onclick",function(){ce.noCloneEvent=false});b3.cloneNode(true).click()}for(b6 in {submit:true,change:true,focusin:true}){b3.setAttribute(b7="on"+b6,"t");ce[b6+"Bubbles"]=b7 in a2||b3.attributes[b7].expando===false}b3.style.backgroundClip="content-box";b3.cloneNode(true).style.backgroundClip="";ce.clearCloneStyle=b3.style.backgroundClip==="content-box";bJ(function(){var cf,ci,ch,cg="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",e=l.getElementsByTagName("body")[0];if(!e){return}cf=l.createElement("div");cf.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px";e.appendChild(cf).appendChild(b3);b3.innerHTML="
    t
    ";ch=b3.getElementsByTagName("td");ch[0].style.cssText="padding:0;margin:0;border:0;display:none";b4=(ch[0].offsetHeight===0);ch[0].style.display="";ch[1].style.display="none";ce.reliableHiddenOffsets=b4&&(ch[0].offsetHeight===0);b3.innerHTML="";b3.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;";ce.boxSizing=(b3.offsetWidth===4);ce.doesNotIncludeMarginInBodyOffset=(e.offsetTop!==1);if(a2.getComputedStyle){ce.pixelPosition=(a2.getComputedStyle(b3,null)||{}).top!=="1%";ce.boxSizingReliable=(a2.getComputedStyle(b3,null)||{width:"4px"}).width==="4px";ci=b3.appendChild(l.createElement("div"));ci.style.cssText=b3.style.cssText=cg;ci.style.marginRight=ci.style.width="0";b3.style.width="1px";ce.reliableMarginRight=!parseFloat((a2.getComputedStyle(ci,null)||{}).marginRight)}if(typeof b3.style.zoom!==aC){b3.innerHTML="";b3.style.cssText=cg+"width:1px;padding:1px;display:inline;zoom:1";ce.inlineBlockNeedsLayout=(b3.offsetWidth===3);b3.style.display="block";b3.innerHTML="
    ";b3.firstChild.style.width="5px";ce.shrinkWrapBlocks=(b3.offsetWidth!==3);if(ce.inlineBlockNeedsLayout){e.style.zoom=1}}e.removeChild(cf);cf=b3=ch=ci=null});cd=cc=b9=b5=cb=ca=null;return ce})();var bw=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,aN=/([A-Z])/g;function ba(b5,b3,b7,b6){if(!bJ.acceptData(b5)){return}var b8,ca,cb=bJ.expando,b9=typeof b3==="string",cc=b5.nodeType,e=cc?bJ.cache:b5,b4=cc?b5[cb]:b5[cb]&&cb;if((!b4||!e[b4]||(!b6&&!e[b4].data))&&b9&&b7===aG){return}if(!b4){if(cc){b5[cb]=b4=a6.pop()||bJ.guid++}else{b4=cb}}if(!e[b4]){e[b4]={};if(!cc){e[b4].toJSON=bJ.noop}}if(typeof b3==="object"||typeof b3==="function"){if(b6){e[b4]=bJ.extend(e[b4],b3)}else{e[b4].data=bJ.extend(e[b4].data,b3)}}b8=e[b4];if(!b6){if(!b8.data){b8.data={}}b8=b8.data}if(b7!==aG){b8[bJ.camelCase(b3)]=b7}if(b9){ca=b8[b3];if(ca==null){ca=b8[bJ.camelCase(b3)]}}else{ca=b8}return ca}function Z(b5,b3,b6){if(!bJ.acceptData(b5)){return}var b8,b7,b9,ca=b5.nodeType,e=ca?bJ.cache:b5,b4=ca?b5[bJ.expando]:bJ.expando;if(!e[b4]){return}if(b3){b9=b6?e[b4]:e[b4].data;if(b9){if(!bJ.isArray(b3)){if(b3 in b9){b3=[b3]}else{b3=bJ.camelCase(b3);if(b3 in b9){b3=[b3]}else{b3=b3.split(" ")}}}else{b3=b3.concat(bJ.map(b3,bJ.camelCase))}for(b8=0,b7=b3.length;b81,null,true)},removeData:function(e){return this.each(function(){bJ.removeData(this,e)})}});function by(b5,b4,b6){if(b6===aG&&b5.nodeType===1){var b3="data-"+b4.replace(aN,"-$1").toLowerCase();b6=b5.getAttribute(b3);if(typeof b6==="string"){try{b6=b6==="true"?true:b6==="false"?false:b6==="null"?null:+b6+""===b6?+b6:bw.test(b6)?bJ.parseJSON(b6):b6}catch(b7){}bJ.data(b5,b4,b6)}else{b6=aG}}return b6}function N(b3){var e;for(e in b3){if(e==="data"&&bJ.isEmptyObject(b3[e])){continue}if(e!=="toJSON"){return false}}return true}bJ.extend({queue:function(b4,b3,b5){var e;if(b4){b3=(b3||"fx")+"queue";e=bJ._data(b4,b3);if(b5){if(!e||bJ.isArray(b5)){e=bJ._data(b4,b3,bJ.makeArray(b5))}else{e.push(b5)}}return e||[]}},dequeue:function(b7,b6){b6=b6||"fx";var b3=bJ.queue(b7,b6),b8=b3.length,b5=b3.shift(),e=bJ._queueHooks(b7,b6),b4=function(){bJ.dequeue(b7,b6)};if(b5==="inprogress"){b5=b3.shift();b8--}e.cur=b5;if(b5){if(b6==="fx"){b3.unshift("inprogress")}delete e.stop;b5.call(b7,b4,e)}if(!b8&&e){e.empty.fire()}},_queueHooks:function(b4,b3){var e=b3+"queueHooks";return bJ._data(b4,e)||bJ._data(b4,e,{empty:bJ.Callbacks("once memory").add(function(){bJ._removeData(b4,b3+"queue");bJ._removeData(b4,e)})})}});bJ.fn.extend({queue:function(e,b3){var b4=2;if(typeof e!=="string"){b3=e;e="fx";b4--}if(arguments.length1)},removeAttr:function(e){return this.each(function(){bJ.removeAttr(this,e)})},prop:function(e,b3){return bJ.access(this,bJ.prop,e,b3,arguments.length>1)},removeProp:function(e){e=bJ.propFix[e]||e;return this.each(function(){try{this[e]=aG;delete this[e]}catch(b3){}})},addClass:function(b9){var b3,e,ca,b6,b4,b5=0,b7=this.length,b8=typeof b9==="string"&&b9;if(bJ.isFunction(b9)){return this.each(function(cb){bJ(this).addClass(b9.call(this,cb,this.className))})}if(b8){b3=(b9||"").match(ac)||[];for(;b5=0){ca=ca.replace(" "+b6+" "," ")}}e.className=b9?bJ.trim(ca):""}}}return this},toggleClass:function(b5,b3){var b4=typeof b5,e=typeof b3==="boolean";if(bJ.isFunction(b5)){return this.each(function(b6){bJ(this).toggleClass(b5.call(this,b6,this.className,b3),b3)})}return this.each(function(){if(b4==="string"){var b8,b7=0,b6=bJ(this),b9=b3,ca=b5.match(ac)||[];while((b8=ca[b7++])){b9=e?b9:!b6.hasClass(b8);b6[b9?"addClass":"removeClass"](b8)}}else{if(b4===aC||b4==="boolean"){if(this.className){bJ._data(this,"__className__",this.className)}this.className=this.className||b5===false?"":bJ._data(this,"__className__")||""}}})},hasClass:function(e){var b5=" "+e+" ",b4=0,b3=this.length;for(;b4=0){return true}}return false},val:function(b5){var b3,e,b6,b4=this[0];if(!arguments.length){if(b4){e=bJ.valHooks[b4.type]||bJ.valHooks[b4.nodeName.toLowerCase()];if(e&&"get" in e&&(b3=e.get(b4,"value"))!==aG){return b3}b3=b4.value;return typeof b3==="string"?b3.replace(ak,""):b3==null?"":b3}return}b6=bJ.isFunction(b5);return this.each(function(b8){var b9,b7=bJ(this);if(this.nodeType!==1){return}if(b6){b9=b5.call(this,b8,b7.val())}else{b9=b5}if(b9==null){b9=""}else{if(typeof b9==="number"){b9+=""}else{if(bJ.isArray(b9)){b9=bJ.map(b9,function(ca){return ca==null?"":ca+""})}}}e=bJ.valHooks[this.type]||bJ.valHooks[this.nodeName.toLowerCase()];if(!e||!("set" in e)||e.set(this,b9,"value")===aG){this.value=b9}})}});bJ.extend({valHooks:{option:{get:function(e){var b3=e.attributes.value;return !b3||b3.specified?e.value:e.text}},select:{get:function(e){var b8,b4,ca=e.options,b6=e.selectedIndex,b5=e.type==="select-one"||b6<0,b9=b5?null:[],b7=b5?b6+1:ca.length,b3=b6<0?b7:b5?b6:0;for(;b3=0});if(!e.length){b3.selectedIndex=-1}return e}}},attr:function(b7,b5,b8){var e,b6,b4,b3=b7.nodeType;if(!b7||b3===3||b3===8||b3===2){return}if(typeof b7.getAttribute===aC){return bJ.prop(b7,b5,b8)}b6=b3!==1||!bJ.isXMLDoc(b7);if(b6){b5=b5.toLowerCase();e=bJ.attrHooks[b5]||(L.test(b5)?bZ:a8)}if(b8!==aG){if(b8===null){bJ.removeAttr(b7,b5)}else{if(e&&b6&&"set" in e&&(b4=e.set(b7,b8,b5))!==aG){return b4}else{b7.setAttribute(b5,b8+"");return b8}}}else{if(e&&b6&&"get" in e&&(b4=e.get(b7,b5))!==null){return b4}else{if(typeof b7.getAttribute!==aC){b4=b7.getAttribute(b5)}return b4==null?aG:b4}}},removeAttr:function(b4,b6){var e,b5,b3=0,b7=b6&&b6.match(ac);if(b7&&b4.nodeType===1){while((e=b7[b3++])){b5=bJ.propFix[e]||e;if(L.test(e)){if(!bP&&aq.test(e)){b4[bJ.camelCase("default-"+e)]=b4[b5]=false}else{b4[b5]=false}}else{bJ.attr(b4,e,"")}b4.removeAttribute(bP?e:b5)}}},attrHooks:{type:{set:function(e,b3){if(!bJ.support.radioValue&&b3==="radio"&&bJ.nodeName(e,"input")){var b4=e.value;e.setAttribute("type",b3);if(b4){e.value=b4}return b3}}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(b7,b5,b8){var b4,e,b6,b3=b7.nodeType;if(!b7||b3===3||b3===8||b3===2){return}b6=b3!==1||!bJ.isXMLDoc(b7);if(b6){b5=bJ.propFix[b5]||b5;e=bJ.propHooks[b5]}if(b8!==aG){if(e&&"set" in e&&(b4=e.set(b7,b8,b5))!==aG){return b4}else{return(b7[b5]=b8)}}else{if(e&&"get" in e&&(b4=e.get(b7,b5))!==null){return b4}else{return b7[b5]}}},propHooks:{tabIndex:{get:function(b3){var e=b3.getAttributeNode("tabindex");return e&&e.specified?parseInt(e.value,10):aF.test(b3.nodeName)||D.test(b3.nodeName)&&b3.href?0:aG}}}});bZ={get:function(b5,b3){var b6=bJ.prop(b5,b3),e=typeof b6==="boolean"&&b5.getAttribute(b3),b4=typeof b6==="boolean"?bF&&bP?e!=null:aq.test(b3)?b5[bJ.camelCase("default-"+b3)]:!!e:b5.getAttributeNode(b3);return b4&&b4.value!==false?b3.toLowerCase():aG},set:function(b3,b4,e){if(b4===false){bJ.removeAttr(b3,e)}else{if(bF&&bP||!aq.test(e)){b3.setAttribute(!bP&&bJ.propFix[e]||e,e)}else{b3[bJ.camelCase("default-"+e)]=b3[e]=true}}return e}};if(!bF||!bP){bJ.attrHooks.value={get:function(b4,b3){var e=b4.getAttributeNode(b3);return bJ.nodeName(b4,"input")?b4.defaultValue:e&&e.specified?e.value:aG},set:function(b3,b4,e){if(bJ.nodeName(b3,"input")){b3.defaultValue=b4}else{return a8&&a8.set(b3,b4,e)}}}}if(!bP){a8=bJ.valHooks.button={get:function(b4,b3){var e=b4.getAttributeNode(b3);return e&&(b3==="id"||b3==="name"||b3==="coords"?e.value!=="":e.specified)?e.value:aG},set:function(b4,b5,b3){var e=b4.getAttributeNode(b3);if(!e){b4.setAttributeNode((e=b4.ownerDocument.createAttribute(b3)))}e.value=b5+="";return b3==="value"||b5===b4.getAttribute(b3)?b5:aG}};bJ.attrHooks.contenteditable={get:a8.get,set:function(b3,b4,e){a8.set(b3,b4===""?false:b4,e)}};bJ.each(["width","height"],function(b3,e){bJ.attrHooks[e]=bJ.extend(bJ.attrHooks[e],{set:function(b4,b5){if(b5===""){b4.setAttribute(e,"auto");return b5}}})})}if(!bJ.support.hrefNormalized){bJ.each(["href","src","width","height"],function(b3,e){bJ.attrHooks[e]=bJ.extend(bJ.attrHooks[e],{get:function(b5){var b4=b5.getAttribute(e,2);return b4==null?aG:b4}})});bJ.each(["href","src"],function(b3,e){bJ.propHooks[e]={get:function(b4){return b4.getAttribute(e,4)}}})}if(!bJ.support.style){bJ.attrHooks.style={get:function(e){return e.style.cssText||aG},set:function(e,b3){return(e.style.cssText=b3+"")}}}if(!bJ.support.optSelected){bJ.propHooks.selected=bJ.extend(bJ.propHooks.selected,{get:function(b3){var e=b3.parentNode;if(e){e.selectedIndex;if(e.parentNode){e.parentNode.selectedIndex}}return null}})}if(!bJ.support.enctype){bJ.propFix.enctype="encoding"}if(!bJ.support.checkOn){bJ.each(["radio","checkbox"],function(){bJ.valHooks[this]={get:function(e){return e.getAttribute("value")===null?"on":e.value}}})}bJ.each(["radio","checkbox"],function(){bJ.valHooks[this]=bJ.extend(bJ.valHooks[this],{set:function(e,b3){if(bJ.isArray(b3)){return(e.checked=bJ.inArray(bJ(e).val(),b3)>=0)}}})});var bH=/^(?:input|select|textarea)$/i,a3=/^key/,bN=/^(?:mouse|contextmenu)|click/,bB=/^(?:focusinfocus|focusoutblur)$/,bu=/^([^.]*)(?:\.(.+)|)$/;function R(){return true}function X(){return false}bJ.event={global:{},add:function(b6,cb,cg,b8,b7){var b9,ch,ci,b4,cd,ca,cf,b5,ce,e,b3,cc=bJ._data(b6);if(!cc){return}if(cg.handler){b4=cg;cg=b4.handler;b7=b4.selector}if(!cg.guid){cg.guid=bJ.guid++}if(!(ch=cc.events)){ch=cc.events={}}if(!(ca=cc.handle)){ca=cc.handle=function(cj){return typeof bJ!==aC&&(!cj||bJ.event.triggered!==cj.type)?bJ.event.dispatch.apply(ca.elem,arguments):aG};ca.elem=b6}cb=(cb||"").match(ac)||[""];ci=cb.length;while(ci--){b9=bu.exec(cb[ci])||[];ce=b3=b9[1];e=(b9[2]||"").split(".").sort();cd=bJ.event.special[ce]||{};ce=(b7?cd.delegateType:cd.bindType)||ce;cd=bJ.event.special[ce]||{};cf=bJ.extend({type:ce,origType:b3,data:b8,handler:cg,guid:cg.guid,selector:b7,needsContext:b7&&bJ.expr.match.needsContext.test(b7),namespace:e.join(".")},b4);if(!(b5=ch[ce])){b5=ch[ce]=[];b5.delegateCount=0;if(!cd.setup||cd.setup.call(b6,b8,e,ca)===false){if(b6.addEventListener){b6.addEventListener(ce,ca,false)}else{if(b6.attachEvent){b6.attachEvent("on"+ce,ca)}}}}if(cd.add){cd.add.call(b6,cf);if(!cf.handler.guid){cf.handler.guid=cg.guid}}if(b7){b5.splice(b5.delegateCount++,0,cf)}else{b5.push(cf)}bJ.event.global[ce]=true}b6=null},remove:function(b5,cb,ci,b6,ca){var b8,cf,b9,b7,ch,cg,cd,b4,ce,e,b3,cc=bJ.hasData(b5)&&bJ._data(b5);if(!cc||!(cg=cc.events)){return}cb=(cb||"").match(ac)||[""];ch=cb.length;while(ch--){b9=bu.exec(cb[ch])||[];ce=b3=b9[1];e=(b9[2]||"").split(".").sort();if(!ce){for(ce in cg){bJ.event.remove(b5,ce+cb[ch],ci,b6,true)}continue}cd=bJ.event.special[ce]||{};ce=(b6?cd.delegateType:cd.bindType)||ce;b4=cg[ce]||[];b9=b9[2]&&new RegExp("(^|\\.)"+e.join("\\.(?:.*\\.|)")+"(\\.|$)");b7=b8=b4.length;while(b8--){cf=b4[b8];if((ca||b3===cf.origType)&&(!ci||ci.guid===cf.guid)&&(!b9||b9.test(cf.namespace))&&(!b6||b6===cf.selector||b6==="**"&&cf.selector)){b4.splice(b8,1);if(cf.selector){b4.delegateCount--}if(cd.remove){cd.remove.call(b5,cf)}}}if(b7&&!b4.length){if(!cd.teardown||cd.teardown.call(b5,e,cc.handle)===false){bJ.removeEvent(b5,ce,cc.handle)}delete cg[ce]}}if(bJ.isEmptyObject(cg)){delete cc.handle;bJ._removeData(b5,"events")}},trigger:function(b3,ca,b6,ch){var cb,b5,cf,cg,cd,b9,b8,b7=[b6||l],ce=V.call(b3,"type")?b3.type:b3,b4=V.call(b3,"namespace")?b3.namespace.split("."):[];cf=b9=b6=b6||l;if(b6.nodeType===3||b6.nodeType===8){return}if(bB.test(ce+bJ.event.triggered)){return}if(ce.indexOf(".")>=0){b4=ce.split(".");ce=b4.shift();b4.sort()}b5=ce.indexOf(":")<0&&"on"+ce;b3=b3[bJ.expando]?b3:new bJ.Event(ce,typeof b3==="object"&&b3);b3.isTrigger=true;b3.namespace=b4.join(".");b3.namespace_re=b3.namespace?new RegExp("(^|\\.)"+b4.join("\\.(?:.*\\.|)")+"(\\.|$)"):null;b3.result=aG;if(!b3.target){b3.target=b6}ca=ca==null?[b3]:bJ.makeArray(ca,[b3]);cd=bJ.event.special[ce]||{};if(!ch&&cd.trigger&&cd.trigger.apply(b6,ca)===false){return}if(!ch&&!cd.noBubble&&!bJ.isWindow(b6)){cg=cd.delegateType||ce;if(!bB.test(cg+ce)){cf=cf.parentNode}for(;cf;cf=cf.parentNode){b7.push(cf);b9=cf}if(b9===(b6.ownerDocument||l)){b7.push(b9.defaultView||b9.parentWindow||a2)}}b8=0;while((cf=b7[b8++])&&!b3.isPropagationStopped()){b3.type=b8>1?cg:cd.bindType||ce;cb=(bJ._data(cf,"events")||{})[b3.type]&&bJ._data(cf,"handle");if(cb){cb.apply(cf,ca)}cb=b5&&cf[b5];if(cb&&bJ.acceptData(cf)&&cb.apply&&cb.apply(cf,ca)===false){b3.preventDefault()}}b3.type=ce;if(!ch&&!b3.isDefaultPrevented()){if((!cd._default||cd._default.apply(b6.ownerDocument,ca)===false)&&!(ce==="click"&&bJ.nodeName(b6,"a"))&&bJ.acceptData(b6)){if(b5&&b6[ce]&&!bJ.isWindow(b6)){b9=b6[b5];if(b9){b6[b5]=null}bJ.event.triggered=ce;try{b6[ce]()}catch(cc){}bJ.event.triggered=aG;if(b9){b6[b5]=b9}}}}return b3.result},dispatch:function(e){e=bJ.event.fix(e);var b6,b7,cb,b3,b5,ca=[],b9=a4.call(arguments),b4=(bJ._data(this,"events")||{})[e.type]||[],b8=bJ.event.special[e.type]||{};b9[0]=e;e.delegateTarget=this;if(b8.preDispatch&&b8.preDispatch.call(this,e)===false){return}ca=bJ.event.handlers.call(this,e,b4);b6=0;while((b3=ca[b6++])&&!e.isPropagationStopped()){e.currentTarget=b3.elem;b5=0;while((cb=b3.handlers[b5++])&&!e.isImmediatePropagationStopped()){if(!e.namespace_re||e.namespace_re.test(cb.namespace)){e.handleObj=cb;e.data=cb.data;b7=((bJ.event.special[cb.origType]||{}).handle||cb.handler).apply(b3.elem,b9);if(b7!==aG){if((e.result=b7)===false){e.preventDefault();e.stopPropagation()}}}}}if(b8.postDispatch){b8.postDispatch.call(this,e)}return e.result},handlers:function(e,b4){var b3,b9,b7,b6,b8=[],b5=b4.delegateCount,ca=e.target;if(b5&&ca.nodeType&&(!e.button||e.type!=="click")){for(;ca!=this;ca=ca.parentNode||this){if(ca.nodeType===1&&(ca.disabled!==true||e.type!=="click")){b7=[];for(b6=0;b6=0:bJ.find(b3,this,null,[ca]).length}if(b7[b3]){b7.push(b9)}}if(b7.length){b8.push({elem:ca,handlers:b7})}}}}if(b5+~])"+cp+"*"),cP=new RegExp(ck),cQ=new RegExp("^"+cK+"$"),cY={ID:new RegExp("^#("+b3+")"),CLASS:new RegExp("^\\.("+b3+")"),NAME:new RegExp("^\\[name=['\"]?("+b3+")['\"]?\\]"),TAG:new RegExp("^("+b3.replace("w","w*")+")"),ATTR:new RegExp("^"+c2),PSEUDO:new RegExp("^"+ck),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+cp+"*(even|odd|(([+-]|)(\\d*)n|)"+cp+"*(?:([+-]|)"+cp+"*(\\d+)|))"+cp+"*\\)|)","i"),needsContext:new RegExp("^"+cp+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+cp+"*((?:-\\d)?\\d*)"+cp+"*\\)|)(?=[^-]|$)","i")},cW=/[\x20\t\r\n\f]*[+~]/,cM=/^[^{]+\{\s*\[native code/,cO=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,b8=/^(?:input|select|textarea|button)$/i,cl=/^h\d$/i,cL=/'|\\/g,ct=/\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,cs=/\\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,c1=function(e,di){var dh="0x"+di-65536;return dh!==dh?di:dh<0?String.fromCharCode(dh+65536):String.fromCharCode(dh>>10|55296,dh&1023|56320)};try{cm.call(cI.documentElement.childNodes,0)[0].nodeType}catch(cC){cm=function(dh){var di,e=[];while((di=this[dh++])){e.push(di)}return e}}function cE(e){return cM.test(e+"")}function cz(){var e,dh=[];return(e=function(di,dj){if(dh.push(di+=" ")>cn.cacheLength){delete e[dh.shift()]}return(e[di]=dj)})}function cj(e){e[c5]=true;return e}function cc(dh){var dj=cB.createElement("div");try{return dh(dj)}catch(di){return false}finally{dj=null}}function cv(dp,dh,dt,dv){var du,dl,dm,dr,ds,dk,dj,e,di,dq;if((dh?dh.ownerDocument||dh:cI)!==cB){cV(dh)}dh=dh||cB;dt=dt||[];if(!dp||typeof dp!=="string"){return dt}if((dr=dh.nodeType)!==1&&dr!==9){return[]}if(!cd&&!dv){if((du=cO.exec(dp))){if((dm=du[1])){if(dr===9){dl=dh.getElementById(dm);if(dl&&dl.parentNode){if(dl.id===dm){dt.push(dl);return dt}}else{return dt}}else{if(dh.ownerDocument&&(dl=dh.ownerDocument.getElementById(dm))&&cF(dh,dl)&&dl.id===dm){dt.push(dl);return dt}}}else{if(du[2]){b4.apply(dt,cm.call(dh.getElementsByTagName(dp),0));return dt}else{if((dm=du[3])&&dd.getByClassName&&dh.getElementsByClassName){b4.apply(dt,cm.call(dh.getElementsByClassName(dm),0));return dt}}}}if(dd.qsa&&!cZ.test(dp)){dj=true;e=c5;di=dh;dq=dr===9&&dp;if(dr===1&&dh.nodeName.toLowerCase()!=="object"){dk=cf(dp);if((dj=dh.getAttribute("id"))){e=dj.replace(cL,"\\$&")}else{dh.setAttribute("id",e)}e="[id='"+e+"'] ";ds=dk.length;while(ds--){dk[ds]=e+cg(dk[ds])}di=cW.test(dp)&&dh.parentNode||dh;dq=dk.join(",")}if(dq){try{b4.apply(dt,cm.call(di.querySelectorAll(dq),0));return dt}catch(dn){}finally{if(!dj){dh.removeAttribute("id")}}}}}return dc(dp.replace(cr,"$1"),dh,dt,dv)}cJ=cv.isXML=function(e){var dh=e&&(e.ownerDocument||e).documentElement;return dh?dh.nodeName!=="HTML":false};cV=cv.setDocument=function(e){var dh=e?e.ownerDocument||e:cI;if(dh===cB||dh.nodeType!==9||!dh.documentElement){return cB}cB=dh;co=dh.documentElement;cd=cJ(dh);dd.tagNameNoComments=cc(function(di){di.appendChild(dh.createComment(""));return !di.getElementsByTagName("*").length});dd.attributes=cc(function(dj){dj.innerHTML="";var di=typeof dj.lastChild.getAttribute("multiple");return di!=="boolean"&&di!=="string"});dd.getByClassName=cc(function(di){di.innerHTML="";if(!di.getElementsByClassName||!di.getElementsByClassName("e").length){return false}di.lastChild.className="e";return di.getElementsByClassName("e").length===2});dd.getByName=cc(function(dj){dj.id=c5+0;dj.innerHTML="
    ";co.insertBefore(dj,co.firstChild);var di=dh.getElementsByName&&dh.getElementsByName(c5).length===2+dh.getElementsByName(c5+0).length;dd.getIdNotName=!dh.getElementById(c5);co.removeChild(dj);return di});cn.attrHandle=cc(function(di){di.innerHTML="";return di.firstChild&&typeof di.firstChild.getAttribute!==c9&&di.firstChild.getAttribute("href")==="#"})?{}:{href:function(di){return di.getAttribute("href",2)},type:function(di){return di.getAttribute("type")}};if(dd.getIdNotName){cn.find.ID=function(dk,dj){if(typeof dj.getElementById!==c9&&!cd){var di=dj.getElementById(dk);return di&&di.parentNode?[di]:[]}};cn.filter.ID=function(dj){var di=dj.replace(cs,c1);return function(dk){return dk.getAttribute("id")===di}}}else{cn.find.ID=function(dk,dj){if(typeof dj.getElementById!==c9&&!cd){var di=dj.getElementById(dk);return di?di.id===dk||typeof di.getAttributeNode!==c9&&di.getAttributeNode("id").value===dk?[di]:ch:[]}};cn.filter.ID=function(dj){var di=dj.replace(cs,c1);return function(dl){var dk=typeof dl.getAttributeNode!==c9&&dl.getAttributeNode("id");return dk&&dk.value===di}}}cn.find.TAG=dd.tagNameNoComments?function(di,dj){if(typeof dj.getElementsByTagName!==c9){return dj.getElementsByTagName(di)}}:function(di,dm){var dn,dl=[],dk=0,dj=dm.getElementsByTagName(di);if(di==="*"){while((dn=dj[dk++])){if(dn.nodeType===1){dl.push(dn)}}return dl}return dj};cn.find.NAME=dd.getByName&&function(di,dj){if(typeof dj.getElementsByName!==c9){return dj.getElementsByName(name)}};cn.find.CLASS=dd.getByClassName&&function(dj,di){if(typeof di.getElementsByClassName!==c9&&!cd){return di.getElementsByClassName(dj)}};db=[];cZ=[":focus"];if((dd.qsa=cE(dh.querySelectorAll))){cc(function(di){di.innerHTML="";if(!di.querySelectorAll("[selected]").length){cZ.push("\\["+cp+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)")}if(!di.querySelectorAll(":checked").length){cZ.push(":checked")}});cc(function(di){di.innerHTML="";if(di.querySelectorAll("[i^='']").length){cZ.push("[*^$]="+cp+"*(?:\"\"|'')")}if(!di.querySelectorAll(":enabled").length){cZ.push(":enabled",":disabled")}di.querySelectorAll("*,:x");cZ.push(",.*:")})}if((dd.matchesSelector=cE((ca=co.matchesSelector||co.mozMatchesSelector||co.webkitMatchesSelector||co.oMatchesSelector||co.msMatchesSelector)))){cc(function(di){dd.disconnectedMatch=ca.call(di,"div");ca.call(di,"[s!='']:x");db.push("!=",ck)})}cZ=new RegExp(cZ.join("|"));db=new RegExp(db.join("|"));cF=cE(co.contains)||co.compareDocumentPosition?function(dj,di){var dl=dj.nodeType===9?dj.documentElement:dj,dk=di&&di.parentNode;return dj===dk||!!(dk&&dk.nodeType===1&&(dl.contains?dl.contains(dk):dj.compareDocumentPosition&&dj.compareDocumentPosition(dk)&16))}:function(dj,di){if(di){while((di=di.parentNode)){if(di===dj){return true}}}return false};cD=co.compareDocumentPosition?function(dj,di){var dk;if(dj===di){cT=true;return 0}if((dk=di.compareDocumentPosition&&dj.compareDocumentPosition&&dj.compareDocumentPosition(di))){if(dk&1||dj.parentNode&&dj.parentNode.nodeType===11){if(dj===dh||cF(cI,dj)){return -1}if(di===dh||cF(cI,di)){return 1}return 0}return dk&4?-1:1}return dj.compareDocumentPosition?-1:1}:function(dj,di){var dq,dm=0,dp=dj.parentNode,dl=di.parentNode,dk=[dj],dn=[di];if(dj===di){cT=true;return 0}else{if(!dp||!dl){return dj===dh?-1:di===dh?1:dp?-1:dl?1:0}else{if(dp===dl){return b6(dj,di)}}}dq=dj;while((dq=dq.parentNode)){dk.unshift(dq)}dq=di;while((dq=dq.parentNode)){dn.unshift(dq)}while(dk[dm]===dn[dm]){dm++}return dm?b6(dk[dm],dn[dm]):dk[dm]===cI?-1:dn[dm]===cI?1:0};cT=false;[0,0].sort(cD);dd.detectDuplicates=cT;return cB};cv.matches=function(dh,e){return cv(dh,null,null,e)};cv.matchesSelector=function(di,dk){if((di.ownerDocument||di)!==cB){cV(di)}dk=dk.replace(ct,"='$1']");if(dd.matchesSelector&&!cd&&(!db||!db.test(dk))&&!cZ.test(dk)){try{var dh=ca.call(di,dk);if(dh||dd.disconnectedMatch||di.document&&di.document.nodeType!==11){return dh}}catch(dj){}}return cv(dk,cB,null,[di]).length>0};cv.contains=function(e,dh){if((e.ownerDocument||e)!==cB){cV(e)}return cF(e,dh)};cv.attr=function(dh,e){var di;if((dh.ownerDocument||dh)!==cB){cV(dh)}if(!cd){e=e.toLowerCase()}if((di=cn.attrHandle[e])){return di(dh)}if(cd||dd.attributes){return dh.getAttribute(e)}return((di=dh.getAttributeNode(e))||dh.getAttribute(e))&&dh[e]===true?e:di&&di.specified?di.value:null};cv.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)};cv.uniqueSort=function(di){var dj,dk=[],dh=1,e=0;cT=!dd.detectDuplicates;di.sort(cD);if(cT){for(;(dj=di[dh]);dh++){if(dj===di[dh-1]){e=dk.push(dh)}}while(e--){di.splice(dk[e],1)}}return di};function b6(dh,e){var dj=e&&dh,di=dj&&(~e.sourceIndex||cN)-(~dh.sourceIndex||cN);if(di){return di}if(dj){while((dj=dj.nextSibling)){if(dj===e){return -1}}}return dh?1:-1}function cw(e){return function(di){var dh=di.nodeName.toLowerCase();return dh==="input"&&di.type===e}}function b7(e){return function(di){var dh=di.nodeName.toLowerCase();return(dh==="input"||dh==="button")&&di.type===e}}function c3(e){return cj(function(dh){dh=+dh;return cj(function(di,dm){var dk,dj=e([],di.length,dh),dl=dj.length;while(dl--){if(di[(dk=dj[dl])]){di[dk]=!(dm[dk]=di[dk])}}})})}cH=cv.getText=function(dk){var dj,dh="",di=0,e=dk.nodeType;if(!e){for(;(dj=dk[di]);di++){dh+=cH(dj)}}else{if(e===1||e===9||e===11){if(typeof dk.textContent==="string"){return dk.textContent}else{for(dk=dk.firstChild;dk;dk=dk.nextSibling){dh+=cH(dk)}}}else{if(e===3||e===4){return dk.nodeValue}}}return dh};cn=cv.selectors={cacheLength:50,createPseudo:cj,match:cY,find:{},relative:{">":{dir:"parentNode",first:true}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:true},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){e[1]=e[1].replace(cs,c1);e[3]=(e[4]||e[5]||"").replace(cs,c1);if(e[2]==="~="){e[3]=" "+e[3]+" "}return e.slice(0,4)},CHILD:function(e){e[1]=e[1].toLowerCase();if(e[1].slice(0,3)==="nth"){if(!e[3]){cv.error(e[0])}e[4]=+(e[4]?e[5]+(e[6]||1):2*(e[3]==="even"||e[3]==="odd"));e[5]=+((e[7]+e[8])||e[3]==="odd")}else{if(e[3]){cv.error(e[0])}}return e},PSEUDO:function(dh){var e,di=!dh[5]&&dh[2];if(cY.CHILD.test(dh[0])){return null}if(dh[4]){dh[2]=dh[4]}else{if(di&&cP.test(di)&&(e=cf(di,true))&&(e=di.indexOf(")",di.length-e)-di.length)){dh[0]=dh[0].slice(0,e);dh[2]=di.slice(0,e)}}return dh.slice(0,3)}},filter:{TAG:function(e){if(e==="*"){return function(){return true}}e=e.replace(cs,c1).toLowerCase();return function(dh){return dh.nodeName&&dh.nodeName.toLowerCase()===e}},CLASS:function(e){var dh=b5[e+" "];return dh||(dh=new RegExp("(^|"+cp+")"+e+"("+cp+"|$)"))&&b5(e,function(di){return dh.test(di.className||(typeof di.getAttribute!==c9&&di.getAttribute("class"))||"")})},ATTR:function(di,dh,e){return function(dk){var dj=cv.attr(dk,di);if(dj==null){return dh==="!="}if(!dh){return true}dj+="";return dh==="="?dj===e:dh==="!="?dj!==e:dh==="^="?e&&dj.indexOf(e)===0:dh==="*="?e&&dj.indexOf(e)>-1:dh==="$="?e&&dj.slice(-e.length)===e:dh==="~="?(" "+dj+" ").indexOf(e)>-1:dh==="|="?dj===e||dj.slice(0,e.length+1)===e+"-":false}},CHILD:function(dh,dk,dj,dl,di){var dn=dh.slice(0,3)!=="nth",e=dh.slice(-4)!=="last",dm=dk==="of-type";return dl===1&&di===0?function(dp){return !!dp.parentNode}:function(dv,dt,dy){var dp,dB,dw,dA,dx,ds,du=dn!==e?"nextSibling":"previousSibling",dz=dv.parentNode,dr=dm&&dv.nodeName.toLowerCase(),dq=!dy&&!dm;if(dz){if(dn){while(du){dw=dv;while((dw=dw[du])){if(dm?dw.nodeName.toLowerCase()===dr:dw.nodeType===1){return false}}ds=du=dh==="only"&&!ds&&"nextSibling"}return true}ds=[e?dz.firstChild:dz.lastChild];if(e&&dq){dB=dz[c5]||(dz[c5]={});dp=dB[dh]||[];dx=dp[0]===de&&dp[1];dA=dp[0]===de&&dp[2];dw=dx&&dz.childNodes[dx];while((dw=++dx&&dw&&dw[du]||(dA=dx=0)||ds.pop())){if(dw.nodeType===1&&++dA&&dw===dv){dB[dh]=[de,dx,dA];break}}}else{if(dq&&(dp=(dv[c5]||(dv[c5]={}))[dh])&&dp[0]===de){dA=dp[1]}else{while((dw=++dx&&dw&&dw[du]||(dA=dx=0)||ds.pop())){if((dm?dw.nodeName.toLowerCase()===dr:dw.nodeType===1)&&++dA){if(dq){(dw[c5]||(dw[c5]={}))[dh]=[de,dA]}if(dw===dv){break}}}}}dA-=di;return dA===dl||(dA%dl===0&&dA/dl>=0)}}},PSEUDO:function(dj,di){var e,dh=cn.pseudos[dj]||cn.setFilters[dj.toLowerCase()]||cv.error("unsupported pseudo: "+dj);if(dh[c5]){return dh(di)}if(dh.length>1){e=[dj,dj,"",di];return cn.setFilters.hasOwnProperty(dj.toLowerCase())?cj(function(dm,dp){var dl,dk=dh(dm,di),dn=dk.length;while(dn--){dl=b9.call(dm,dk[dn]);dm[dl]=!(dp[dl]=dk[dn])}}):function(dk){return dh(dk,0,e)}}return dh}},pseudos:{not:cj(function(e){var dh=[],di=[],dj=cS(e.replace(cr,"$1"));return dj[c5]?cj(function(dl,dr,dp,dm){var dq,dk=dj(dl,null,dm,[]),dn=dl.length;while(dn--){if((dq=dk[dn])){dl[dn]=!(dr[dn]=dq)}}}):function(dm,dl,dk){dh[0]=dm;dj(dh,null,dk,di);return !di.pop()}}),has:cj(function(e){return function(dh){return cv(e,dh).length>0}}),contains:cj(function(e){return function(dh){return(dh.textContent||dh.innerText||cH(dh)).indexOf(e)>-1}}),lang:cj(function(e){if(!cQ.test(e||"")){cv.error("unsupported lang: "+e)}e=e.replace(cs,c1).toLowerCase();return function(di){var dh;do{if((dh=cd?di.getAttribute("xml:lang")||di.getAttribute("lang"):di.lang)){dh=dh.toLowerCase();return dh===e||dh.indexOf(e+"-")===0}}while((di=di.parentNode)&&di.nodeType===1);return false}}),target:function(e){var dh=da.location&&da.location.hash;return dh&&dh.slice(1)===e.id},root:function(e){return e===co},focus:function(e){return e===cB.activeElement&&(!cB.hasFocus||cB.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===false},disabled:function(e){return e.disabled===true},checked:function(e){var dh=e.nodeName.toLowerCase();return(dh==="input"&&!!e.checked)||(dh==="option"&&!!e.selected)},selected:function(e){if(e.parentNode){e.parentNode.selectedIndex}return e.selected===true},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling){if(e.nodeName>"@"||e.nodeType===3||e.nodeType===4){return false}}return true},parent:function(e){return !cn.pseudos.empty(e)},header:function(e){return cl.test(e.nodeName)},input:function(e){return b8.test(e.nodeName)},button:function(dh){var e=dh.nodeName.toLowerCase();return e==="input"&&dh.type==="button"||e==="button"},text:function(dh){var e;return dh.nodeName.toLowerCase()==="input"&&dh.type==="text"&&((e=dh.getAttribute("type"))==null||e.toLowerCase()===dh.type)},first:c3(function(){return[0]}),last:c3(function(e,dh){return[dh-1]}),eq:c3(function(e,di,dh){return[dh<0?dh+di:dh]}),even:c3(function(e,di){var dh=0;for(;dh=0;){e.push(dh)}return e}),gt:c3(function(e,dj,di){var dh=di<0?di+dj:di;for(;++dh1?function(dk,dj,dh){var di=e.length;while(di--){if(!e[di](dk,dj,dh)){return false}}return true}:e[0]}function cX(e,dh,di,dj,dm){var dk,dq=[],dl=0,dn=e.length,dp=dh!=null;for(;dl-1){dx[dz]=!(du[dz]=dr)}}}}else{dt=cX(dt===du?dt.splice(dn,dt.length):dt);if(dl){dl(null,du,dt,dw)}else{b4.apply(du,dt)}}})}function c6(dm){var dh,dk,di,dl=dm.length,dq=cn.relative[dm[0].type],dr=dq||cn.relative[" "],dj=dq?1:0,dn=cq(function(ds){return ds===dh},dr,true),dp=cq(function(ds){return b9.call(dh,ds)>-1},dr,true),e=[function(du,dt,ds){return(!dq&&(ds||dt!==dg))||((dh=dt).nodeType?dn(du,dt,ds):dp(du,dt,ds))}];for(;dj1&&df(e),dj>1&&cg(dm.slice(0,dj-1)).replace(cr,"$1"),dk,dj0,dk=dj.length>0,dh=function(dw,dq,dv,du,dC){var dr,ds,dx,dB=[],dA=0,dt="0",dm=dw&&[],dy=dC!=null,dz=dg,dp=dw||dk&&cn.find.TAG("*",dC&&dq.parentNode||dq),dn=(de+=dz==null?1:Math.random()||0.1);if(dy){dg=dq!==cB&&dq;cb=dl}for(;(dr=dp[dt])!=null;dt++){if(dk&&dr){ds=0;while((dx=dj[ds++])){if(dx(dr,dq,dv)){du.push(dr);break}}if(dy){de=dn;cb=++dl}}if(e){if((dr=!dx&&dr)){dA--}if(dw){dm.push(dr)}}}dA+=dt;if(e&&dt!==dA){ds=0;while((dx=di[ds++])){dx(dm,dB,dq,dv)}if(dw){if(dA>0){while(dt--){if(!(dm[dt]||dB[dt])){dB[dt]=c8.call(du)}}}dB=cX(dB)}b4.apply(du,dB);if(dy&&!dw&&dB.length>0&&(dA+di.length)>1){cv.uniqueSort(du)}}if(dy){de=dn;dg=dz}return dm};return e?cj(dh):dh}cS=cv.compile=function(e,dl){var di,dh=[],dk=[],dj=cG[e+" "];if(!dj){if(!dl){dl=cf(e)}di=dl.length;while(di--){dj=c6(dl[di]);if(dj[c5]){dh.push(dj)}else{dk.push(dj)}}dj=cG(e,cU(dk,dh))}return dj};function cy(dh,dk,dj){var di=0,e=dk.length;for(;di2&&(dh=dp[0]).type==="ID"&&e.nodeType===9&&!cd&&cn.relative[dp[1].type]){e=cn.find.ID(dh.matches[0].replace(cs,c1),e)[0];if(!e){return dj}di=di.slice(dp.shift().value.length)}dk=cY.needsContext.test(di)?0:dp.length;while(dk--){dh=dp[dk];if(cn.relative[(dq=dh.type)]){break}if((dn=cn.find[dq])){if((dm=dn(dh.matches[0].replace(cs,c1),cW.test(dp[0].type)&&e.parentNode||e))){dp.splice(dk,1);di=dm.length&&cg(dp);if(!di){b4.apply(dj,cm.call(dm,0));return dj}break}}}}}cS(di,dl)(dm,e,cd,dj,cW.test(di));return dj}cn.pseudos.nth=cn.pseudos.eq;function cR(){}cn.filters=cR.prototype=cn.pseudos;cn.setFilters=new cR();cV();cv.attr=bJ.attr;bJ.find=cv;bJ.expr=cv.selectors;bJ.expr[":"]=bJ.expr.pseudos;bJ.unique=cv.uniqueSort;bJ.text=cv.getText;bJ.isXMLDoc=cv.isXML;bJ.contains=cv.contains})(a2);var aj=/Until$/,bt=/^(?:parents|prev(?:Until|All))/,an=/^.[^:#\[\.,]*$/,y=bJ.expr.match.needsContext,bx={children:true,contents:true,next:true,prev:true};bJ.fn.extend({find:function(b3){var b6,b5,b4,e=this.length;if(typeof b3!=="string"){b4=this;return this.pushStack(bJ(b3).filter(function(){for(b6=0;b61?bJ.unique(b5):b5);b5.selector=(this.selector?this.selector+" ":"")+b3;return b5},has:function(b5){var b4,b3=bJ(b5,this),e=b3.length;return this.filter(function(){for(b4=0;b4=0:bJ.filter(e,this).length>0:this.filter(e).length>0)},closest:function(b6,b5){var b7,b4=0,e=this.length,b3=[],b8=y.test(b6)||typeof b6!=="string"?bJ(b6,b5||this.context):0;for(;b4-1:bJ.find.matchesSelector(b7,b6)){b3.push(b7);break}b7=b7.parentNode}}return this.pushStack(b3.length>1?bJ.unique(b3):b3)},index:function(e){if(!e){return(this[0]&&this[0].parentNode)?this.first().prevAll().length:-1}if(typeof e==="string"){return bJ.inArray(this[0],bJ(e))}return bJ.inArray(e.jquery?e[0]:e,this)},add:function(e,b3){var b5=typeof e==="string"?bJ(e,b3):bJ.makeArray(e&&e.nodeType?[e]:e),b4=bJ.merge(this.get(),b5);return this.pushStack(bJ.unique(b4))},addBack:function(e){return this.add(e==null?this.prevObject:this.prevObject.filter(e))}});bJ.fn.andSelf=bJ.fn.addBack;function aX(b3,e){do{b3=b3[e]}while(b3&&b3.nodeType!==1);return b3}bJ.each({parent:function(b3){var e=b3.parentNode;return e&&e.nodeType!==11?e:null},parents:function(e){return bJ.dir(e,"parentNode")},parentsUntil:function(b3,e,b4){return bJ.dir(b3,"parentNode",b4)},next:function(e){return aX(e,"nextSibling")},prev:function(e){return aX(e,"previousSibling")},nextAll:function(e){return bJ.dir(e,"nextSibling")},prevAll:function(e){return bJ.dir(e,"previousSibling")},nextUntil:function(b3,e,b4){return bJ.dir(b3,"nextSibling",b4)},prevUntil:function(b3,e,b4){return bJ.dir(b3,"previousSibling",b4)},siblings:function(e){return bJ.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return bJ.sibling(e.firstChild)},contents:function(e){return bJ.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:bJ.merge([],e.childNodes)}},function(e,b3){bJ.fn[e]=function(b6,b4){var b5=bJ.map(this,b3,b6);if(!aj.test(e)){b4=b6}if(b4&&typeof b4==="string"){b5=bJ.filter(b4,b5)}b5=this.length>1&&!bx[e]?bJ.unique(b5):b5;if(this.length>1&&bt.test(e)){b5=b5.reverse()}return this.pushStack(b5)}});bJ.extend({filter:function(b4,e,b3){if(b3){b4=":not("+b4+")"}return e.length===1?bJ.find.matchesSelector(e[0],b4)?[e[0]]:[]:bJ.find.matches(b4,e)},dir:function(b4,b3,b6){var e=[],b5=b4[b3];while(b5&&b5.nodeType!==9&&(b6===aG||b5.nodeType!==1||!bJ(b5).is(b6))){if(b5.nodeType===1){e.push(b5)}b5=b5[b3]}return e},sibling:function(b4,b3){var e=[];for(;b4;b4=b4.nextSibling){if(b4.nodeType===1&&b4!==b3){e.push(b4)}}return e}});function aO(b5,b4,e){b4=b4||0;if(bJ.isFunction(b4)){return bJ.grep(b5,function(b7,b6){var b8=!!b4.call(b7,b6,b7);return b8===e})}else{if(b4.nodeType){return bJ.grep(b5,function(b6){return(b6===b4)===e})}else{if(typeof b4==="string"){var b3=bJ.grep(b5,function(b6){return b6.nodeType===1});if(an.test(b4)){return bJ.filter(b4,b3,!e)}else{b4=bJ.filter(b4,b3)}}}}return bJ.grep(b5,function(b6){return(bJ.inArray(b6,b4)>=0)===e})}function A(e){var b4=d.split("|"),b3=e.createDocumentFragment();if(b3.createElement){while(b4.length){b3.createElement(b4.pop())}}return b3}var d="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",aA=/ jQuery\d+="(?:null|\d+)"/g,J=new RegExp("<(?:"+d+")[\\s/>]","i"),b2=/^\s+/,aD=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,m=/<([\w:]+)/,bX=/\s*$/g,T={option:[1,""],legend:[1,"
    ","
    "],area:[1,"",""],param:[1,"",""],thead:[1,"","
    "],tr:[2,"","
    "],col:[2,"","
    "],td:[3,"","
    "],_default:bJ.support.htmlSerialize?[0,"",""]:[1,"X
    ","
    "]},aS=A(l),j=aS.appendChild(l.createElement("div"));T.optgroup=T.option;T.tbody=T.tfoot=T.colgroup=T.caption=T.thead;T.th=T.td;bJ.fn.extend({text:function(e){return bJ.access(this,function(b3){return b3===aG?bJ.text(this):this.empty().append((this[0]&&this[0].ownerDocument||l).createTextNode(b3))},null,e,arguments.length)},wrapAll:function(e){if(bJ.isFunction(e)){return this.each(function(b4){bJ(this).wrapAll(e.call(this,b4))})}if(this[0]){var b3=bJ(e,this[0].ownerDocument).eq(0).clone(true);if(this[0].parentNode){b3.insertBefore(this[0])}b3.map(function(){var b4=this;while(b4.firstChild&&b4.firstChild.nodeType===1){b4=b4.firstChild}return b4}).append(this)}return this},wrapInner:function(e){if(bJ.isFunction(e)){return this.each(function(b3){bJ(this).wrapInner(e.call(this,b3))})}return this.each(function(){var b3=bJ(this),b4=b3.contents();if(b4.length){b4.wrapAll(e)}else{b3.append(e)}})},wrap:function(e){var b3=bJ.isFunction(e);return this.each(function(b4){bJ(this).wrapAll(b3?e.call(this,b4):e)})},unwrap:function(){return this.parent().each(function(){if(!bJ.nodeName(this,"body")){bJ(this).replaceWith(this.childNodes)}}).end()},append:function(){return this.domManip(arguments,true,function(e){if(this.nodeType===1||this.nodeType===11||this.nodeType===9){this.appendChild(e)}})},prepend:function(){return this.domManip(arguments,true,function(e){if(this.nodeType===1||this.nodeType===11||this.nodeType===9){this.insertBefore(e,this.firstChild)}})},before:function(){return this.domManip(arguments,false,function(e){if(this.parentNode){this.parentNode.insertBefore(e,this)}})},after:function(){return this.domManip(arguments,false,function(e){if(this.parentNode){this.parentNode.insertBefore(e,this.nextSibling)}})},remove:function(e,b5){var b4,b3=0;for(;(b4=this[b3])!=null;b3++){if(!e||bJ.filter(e,[b4]).length>0){if(!b5&&b4.nodeType===1){bJ.cleanData(k(b4))}if(b4.parentNode){if(b5&&bJ.contains(b4.ownerDocument,b4)){bs(k(b4,"script"))}b4.parentNode.removeChild(b4)}}}return this},empty:function(){var b3,e=0;for(;(b3=this[e])!=null;e++){if(b3.nodeType===1){bJ.cleanData(k(b3,false))}while(b3.firstChild){b3.removeChild(b3.firstChild)}if(b3.options&&bJ.nodeName(b3,"select")){b3.options.length=0}}return this},clone:function(b3,e){b3=b3==null?false:b3;e=e==null?b3:e;return this.map(function(){return bJ.clone(this,b3,e)})},html:function(e){return bJ.access(this,function(b6){var b5=this[0]||{},b4=0,b3=this.length;if(b6===aG){return b5.nodeType===1?b5.innerHTML.replace(aA,""):aG}if(typeof b6==="string"&&!al.test(b6)&&(bJ.support.htmlSerialize||!J.test(b6))&&(bJ.support.leadingWhitespace||!b2.test(b6))&&!T[(m.exec(b6)||["",""])[1].toLowerCase()]){b6=b6.replace(aD,"<$1>");try{for(;b4")){ca=b3.cloneNode(true)}else{j.innerHTML=b3.outerHTML;j.removeChild(ca=j.firstChild)}if((!bJ.support.noCloneEvent||!bJ.support.noCloneChecked)&&(b3.nodeType===1||b3.nodeType===11)&&!bJ.isXMLDoc(b3)){b7=k(ca);b8=k(b3);for(b6=0;(b4=b8[b6])!=null;++b6){if(b7[b6]){Q(b4,b7[b6])}}}if(b5){if(e){b8=b8||k(b3);b7=b7||k(ca);for(b6=0;(b4=b8[b6])!=null;b6++){at(b4,b7[b6])}}else{at(b3,ca)}}b7=k(ca,"script");if(b7.length>0){bs(b7,!b9&&k(b3,"script"))}b7=b8=b4=null;return ca},buildFragment:function(b3,b5,ca,cf){var cb,b7,b9,ce,cg,cd,b4,b8=b3.length,b6=A(b5),e=[],cc=0;for(;cc")+b4[2];cb=b4[0];while(cb--){ce=ce.lastChild}if(!bJ.support.leadingWhitespace&&b2.test(b7)){e.push(b5.createTextNode(b2.exec(b7)[0]))}if(!bJ.support.tbody){b7=cg==="table"&&!bX.test(b7)?ce.firstChild:b4[1]===""&&!bX.test(b7)?ce:0;cb=b7&&b7.childNodes.length;while(cb--){if(bJ.nodeName((cd=b7.childNodes[cb]),"tbody")&&!cd.childNodes.length){b7.removeChild(cd)}}}bJ.merge(e,ce.childNodes);ce.textContent="";while(ce.firstChild){ce.removeChild(ce.firstChild)}ce=b6.lastChild}}}}if(ce){b6.removeChild(ce)}if(!bJ.support.appendChecked){bJ.grep(k(e,"input"),bV)}cc=0;while((b7=e[cc++])){if(cf&&bJ.inArray(b7,cf)!==-1){continue}b9=bJ.contains(b7.ownerDocument,b7);ce=k(b6.appendChild(b7),"script");if(b9){bs(ce)}if(ca){cb=0;while((b7=ce[cb++])){if(bz.test(b7.type||"")){ca.push(b7)}}}}ce=null;return b6},cleanData:function(b3,cb){var b5,ca,b4,b6,b7=0,cc=bJ.expando,e=bJ.cache,b8=bJ.support.deleteExpando,b9=bJ.event.special;for(;(b5=b3[b7])!=null;b7++){if(cb||bJ.acceptData(b5)){b4=b5[cc];b6=b4&&e[b4];if(b6){if(b6.events){for(ca in b6.events){if(b9[ca]){bJ.event.remove(b5,ca)}else{bJ.removeEvent(b5,ca,b6.handle)}}}if(e[b4]){delete e[b4];if(b8){delete b5[cc]}else{if(typeof b5.removeAttribute!==aC){b5.removeAttribute(cc)}else{b5[cc]=null}}a6.push(b4)}}}}}});var aE,bo,E,bg=/alpha\([^)]*\)/i,aT=/opacity\s*=\s*([^)]*)/,bn=/^(top|right|bottom|left)$/,F=/^(none|table(?!-c[ea]).+)/,aY=/^margin/,a9=new RegExp("^("+bA+")(.*)$","i"),W=new RegExp("^("+bA+")(?!px)[a-z%]+$","i"),S=new RegExp("^([+-])=("+bA+")","i"),bj={BODY:"block"},bb={position:"absolute",visibility:"hidden",display:"block"},bC={letterSpacing:0,fontWeight:400},bT=["Top","Right","Bottom","Left"],av=["Webkit","O","Moz","ms"];function b(b5,b3){if(b3 in b5){return b3}var b6=b3.charAt(0).toUpperCase()+b3.slice(1),e=b3,b4=av.length;while(b4--){b3=av[b4]+b6;if(b3 in b5){return b3}}return e}function P(b3,e){b3=e||b3;return bJ.css(b3,"display")==="none"||!bJ.contains(b3.ownerDocument,b3)}function p(b8,e){var b9,b6,b7,b3=[],b4=0,b5=b8.length;for(;b41)},show:function(){return p(this,true)},hide:function(){return p(this)},toggle:function(b3){var e=typeof b3==="boolean";return this.each(function(){if(e?b3:P(this)){bJ(this).show()}else{bJ(this).hide()}})}});bJ.extend({cssHooks:{opacity:{get:function(b4,b3){if(b3){var e=E(b4,"opacity");return e===""?"1":e}}}},cssNumber:{columnCount:true,fillOpacity:true,fontWeight:true,lineHeight:true,opacity:true,orphans:true,widows:true,zIndex:true,zoom:true},cssProps:{"float":bJ.support.cssFloat?"cssFloat":"styleFloat"},style:function(b5,b4,cb,b6){if(!b5||b5.nodeType===3||b5.nodeType===8||!b5.style){return}var b9,ca,cc,b7=bJ.camelCase(b4),b3=b5.style;b4=bJ.cssProps[b7]||(bJ.cssProps[b7]=b(b3,b7));cc=bJ.cssHooks[b4]||bJ.cssHooks[b7];if(cb!==aG){ca=typeof cb;if(ca==="string"&&(b9=S.exec(cb))){cb=(b9[1]+1)*b9[2]+parseFloat(bJ.css(b5,b4));ca="number"}if(cb==null||ca==="number"&&isNaN(cb)){return}if(ca==="number"&&!bJ.cssNumber[b7]){cb+="px"}if(!bJ.support.clearCloneStyle&&cb===""&&b4.indexOf("background")===0){b3[b4]="inherit"}if(!cc||!("set" in cc)||(cb=cc.set(b5,cb,b6))!==aG){try{b3[b4]=cb}catch(b8){}}}else{if(cc&&"get" in cc&&(b9=cc.get(b5,false,b6))!==aG){return b9}return b3[b4]}},css:function(b8,b6,b3,b7){var b5,b9,e,b4=bJ.camelCase(b6);b6=bJ.cssProps[b4]||(bJ.cssProps[b4]=b(b8.style,b4));e=bJ.cssHooks[b6]||bJ.cssHooks[b4];if(e&&"get" in e){b9=e.get(b8,true,b3)}if(b9===aG){b9=E(b8,b6,b7)}if(b9==="normal"&&b6 in bC){b9=bC[b6]}if(b3===""||b3){b5=parseFloat(b9);return b3===true||bJ.isNumeric(b5)?b5||0:b9}return b9},swap:function(b7,b6,b8,b5){var b4,b3,e={};for(b3 in b6){e[b3]=b7.style[b3];b7.style[b3]=b6[b3]}b4=b8.apply(b7,b5||[]);for(b3 in b6){b7.style[b3]=e[b3]}return b4}});if(a2.getComputedStyle){bo=function(e){return a2.getComputedStyle(e,null)};E=function(b6,b4,b8){var b5,b3,ca,b7=b8||bo(b6),b9=b7?b7.getPropertyValue(b4)||b7[b4]:aG,e=b6.style;if(b7){if(b9===""&&!bJ.contains(b6.ownerDocument,b6)){b9=bJ.style(b6,b4)}if(W.test(b9)&&aY.test(b4)){b5=e.width;b3=e.minWidth;ca=e.maxWidth;e.minWidth=e.maxWidth=e.width=b9;b9=b7.width;e.width=b5;e.minWidth=b3;e.maxWidth=ca}}return b9}}else{if(l.documentElement.currentStyle){bo=function(e){return e.currentStyle};E=function(b5,b3,b8){var b4,b7,b9,b6=b8||bo(b5),ca=b6?b6[b3]:aG,e=b5.style;if(ca==null&&e&&e[b3]){ca=e[b3]}if(W.test(ca)&&!bn.test(b3)){b4=e.left;b7=b5.runtimeStyle;b9=b7&&b7.left;if(b9){b7.left=b5.currentStyle.left}e.left=b3==="fontSize"?"1em":ca;ca=e.pixelLeft+"px";e.left=b4;if(b9){b7.left=b9}}return ca===""?"auto":ca}}}function aJ(e,b4,b5){var b3=a9.exec(b4);return b3?Math.max(0,b3[1]-(b5||0))+(b3[2]||"px"):b4}function aw(b6,b3,e,b8,b5){var b4=e===(b8?"border":"content")?4:b3==="width"?1:0,b7=0;for(;b4<4;b4+=2){if(e==="margin"){b7+=bJ.css(b6,e+bT[b4],true,b5)}if(b8){if(e==="content"){b7-=bJ.css(b6,"padding"+bT[b4],true,b5)}if(e!=="margin"){b7-=bJ.css(b6,"border"+bT[b4]+"Width",true,b5)}}else{b7+=bJ.css(b6,"padding"+bT[b4],true,b5);if(e!=="padding"){b7+=bJ.css(b6,"border"+bT[b4]+"Width",true,b5)}}}return b7}function u(b6,b3,e){var b5=true,b7=b3==="width"?b6.offsetWidth:b6.offsetHeight,b4=bo(b6),b8=bJ.support.boxSizing&&bJ.css(b6,"boxSizing",false,b4)==="border-box";if(b7<=0||b7==null){b7=E(b6,b3,b4);if(b7<0||b7==null){b7=b6.style[b3]}if(W.test(b7)){return b7}b5=b8&&(bJ.support.boxSizingReliable||b7===b6.style[b3]);b7=parseFloat(b7)||0}return(b7+aw(b6,b3,e||(b8?"border":"content"),b5,b4))+"px"}function bE(b4){var b3=l,e=bj[b4];if(!e){e=a1(b4,b3);if(e==="none"||!e){aE=(aE||bJ("