pax_global_header 0000666 0000000 0000000 00000000064 12550162376 0014521 g ustar 00root root 0000000 0000000 52 comment=09472eaa18a3c717c067452f5474f246fcb62814
pgbadger-7.1/ 0000775 0000000 0000000 00000000000 12550162376 0013143 5 ustar 00root root 0000000 0000000 pgbadger-7.1/.gitignore 0000664 0000000 0000000 00000000023 12550162376 0015126 0 ustar 00root root 0000000 0000000 # Swap files
*.swp
pgbadger-7.1/.perltidyrc 0000664 0000000 0000000 00000000742 12550162376 0015330 0 ustar 00root root 0000000 0000000 --backup-and-modify-in-place
--backup-file-extension=beforeTidy
--block-brace-tightness=2
--brace-tightness=2
--closing-token-indentation=1
--continuation-indentation=4
--indent-columns=4
--maximum-line-length=134
--cuddled-else
--opening-sub-brace-on-new-line
--noopening-brace-on-new-line
--nooutdent-labels
--paren-tightness=2
--square-bracket-tightness=2
--vertical-tightness=0
--vertical-tightness-closing=0
--break-at-old-comma-breakpoints
--entab-leading-whitespace=4
--tabs
pgbadger-7.1/CONTRIBUTING.md 0000664 0000000 0000000 00000000533 12550162376 0015375 0 ustar 00root root 0000000 0000000 # How to contribute #
##Before Submitting an issue##
1. Upgrade to the latest version of pgBadger and see if the problem remains
2. Look at the [closed issues](https://github.com/dalibo/pgbadger/issues?state=closed), we may have already answered a similar problem
3. [Read the doc](http://dalibo.github.com/pgbadger/). It is short and useful.
pgbadger-7.1/ChangeLog 0000664 0000000 0000000 00000223136 12550162376 0014724 0 ustar 00root root 0000000 0000000 2015-07-11 version 7.1
This new release fixes some issues and adds a new report:
* Distribution of sessions per application
It also adds Json operators to SQL Beautifier.
Here is the full list of changes/fixes:
- Fix unwanted seek on old parsing position when log entry is stdin.
Thanks to Olivier Schiavo for the report.
- Try to fix a potential issue in log start/end date parsing. Thanks
to gityerhubhere for the report.
- Fix broken queries with multiline in bind parameters. Thank to
Nicolas Thauvin for the report.
- Add new report Sessions per application. Thanks to Keith Fiske for
the feature request.
- Add Json Operators to SQL Beautifier. Thanks to Tom Burnett and
Hubert depesz Lubaczewski.
- Makefile.PL: changed manpage section from '1' to '1p', fixes #237.
Thanks to Cyril Bouthors for the patch.
- Update Copyright date-range and installation instructions that was
still refering to version 5. Thanks to Steve Crawford for the report.
- Fix typo in changelog
Note that new official releases must now be downloaded from GitHub and no more
from SourceForge. Download at https://github.com/dalibo/pgbadger/releases
2015-05-08 version 7.0
This major release adds some more useful reports and features.
* New report about events distribution per 5 minutes.
* New per application details (total duration and times executed) for each
query reported in Top Queries reports. The details are visible from a new
button called "App(s) involved".
* Add support to auto_explain extension. EXPLAIN plan will be added together
with top slowest queries when available in log file.
* Add a link to automatically open the explain plan on http://explain.depesz.com/
* New report on queries cumulated durations per user.
* New report about the Number of cancelled queries (graph)
* New report about Queries generating the most cancellation (N)
* New report about Queries most cancelled.
Here is the full list of changes/fixes:
- Update documentation with last reports.
- Fix number of event samples displayed in event reports.
- Add new report about events distribution per x minutes.
- Add app=%a default prefix to documentation.
- Add reports of "App(s) involved" with top queries. Thanks to Antti Koivisto
for the feature request.
- Remove newline between a ) and , in the beautifier.
- Add link to automatically open the explain plan on http://explain.depesz.com/
- Add support to auto_explain, EXPLAIN plan will be added together with top
slowest queries when available in the log file.
- Add a graph on distributed duration per user. Thanks to Korriliam for the
patch.
- Add tree new report: Number of cancelled queries (graph), Queries generating
the most cancellation (N) and Queries most cancelled lists. Thanks to Thomas
Reiss for the feature request.
- Fix case where temporary file statement must be retrieved from the previous
LOG statement and not in the following STATEMENT log entry. Thanks to Mael
Rimbault for the report.
- Add --enable-checksum to show a md5 hash of each reported queries. Thanks
to Thomas Reiss for the feature request.
2015-04-13 version 6.4
This new release fixes a major bugs in SQL beautifier which removed operator
and adds some useful improvement in anonymization of parameters values.
pgBadger will also try to parse the full csvlog when a broken CSV line is
encountered.
- Make anonymization more useful. Thanks to Hubert depesz Lubaczewski
for the patch.
- Fix previous patch for csvlog generated with a PostgreSQL version
before 9.0.
- Try continue CSV parsing after broken CSV line. Thanks to Sergey
Burladyan for the patch.
- Fix bug in SQL beautifier which removed operator. Thanks to Thomas
Reiss for the report.
- Fix loop exit, check terminate quickly and correct comments
indentation. Thanks to Sergey Burladyan for the patch
Please upgrade.
2015-03-27 version 6.3
This new release fixes some bugs and adds some new reports:
* A new per user details (total duration and times executed) for each query
reported in Top Queries reports. The details are visible from a new button
called "User(s) involved".
* Add "Average queries per session" and "Average queries duration per session"
in Sessions tab of the Global statistics.
* Add connection time histogram.
* Use bar graph for Histogram of query times and sessions times.
There's also some cool new features and options:
* Add -L | --logfile-list option to read a list of logfiles from an external
file.
* Add support to log_timezones with + and - signs for timestamp with
milliseconds (%m).
* Add --noreport option to instruct pgbadger to not build any HTML reports
in incremental mode. pgBadger will only create binary files.
* Add auto detection of client=%h or remote=%h from the log so that adding
a prefix is not needed when it respect the default of pgbadger.
* Redefine sessions duration histogram bound to be more accurate.
* Add new option -M | --no-multiline to not collect multi-line statement
and avoid storing and reporting garbage when needed.
* Add --log-duration option to force pgbadger to associate log entries
generated by both log_duration=on and log_statement=all.
The pgbadger_tools script have also been improve with new features:
* Add a new tool to pgbadger_tool to output top queries in CSV format for
follow-up analysis.
* Add --explain-time-consuming and --explain-normalized options to generate
explain statement about top time consuming and top normalized slowest
queries.
Here is the full list of changes/fixes:
- Update flotr2.min.js to latest github code.
- Add per user detail information (total duration and times executed)
for each query reported in "Time consuming queries", "Most frequent
queries" "and Normalized slowest queries". The details are visible
from a new button called "User(s) involved" near the "Examples"
button. Thanks to Guillaume Le Bihan for the patch and tsn77130 for
the feature request.
- pgbadger_tool: add tool to output top queries to CSV format, for
follow-up analysis. Thanks to briklen for the patch.
- Add geometric operators to SQL beautifier. Thanks to Rodolphe
Quiedeville for the report.
- Fix non closing session when a process crash with message:
"terminating connection because of crash of another server process".
Thanks to Mael Rimbault for the report.
- Add -L|--logfile-list command line option to read a list of logfiles
from a file. Thanks to Hubert depesz Lubaczewski for the feature
request.
- Automatically remove %q from prefix. Thanks to mbecroft for report.
- Do not store DEALLOCATE log entries anymore.
- Fix queries histogram where range was not appears in the right order.
Thanks to Grzegorz Garlewicz for the report.
- Fix min yaxis in histogram graph. Thanks to grzeg1 for the patch.
- Add --log-duration command line option to force pgbadger to associate
log entries generated by both log_duration = on and log_statement=all.
Thanks to grzeg1 for the feature request.
- Small typographical corrections. Thanks to Jefferson Queiroz Venerando
and Bill Mitchell the patches.
- Reformat usage output and add explanation of the --noreport command
line option.
- Fix documentation about minimal pattern in custom log format. Thanks
to Julien Rouhaud for the suggestion.
- Add support to log_timezones with + and - signs to timestamp with
milliseconds (%m). Thanks to jacksonfoz for the patch.
pgbadger was not recognize log files with timezones like 'GMT+3'.
- Add --noreport command line option to instruct pgbadger to not build
any reports in incremental mode. pgBadger will only create binary
files. Thanks to hubert Depesz Lubaczewski for the feature request.
- Add time consuming information in tables of Queries per database...
Thanks to Thomas for the feature request.
- Add more details about the CSV parser error. It now prints the line
number and the last parameter that generate the failure. This should
allow to see the malformed log entry.
- Change substitution markup in attempt to fix a new look-behind
assertions error. Thanks to Paolo Cavallini for the report.
- Use bar graph for Histogram of query times and sessions times.
- Fix wrong count of min/max queries per second. Thanks to Guillaume
Lelarge for the report. Add COPY statement to SELECT or INSERT
statements statistics following the copy direction (stdin or stdout).
- Fix Illegal division by zero at line 3832. Thanks to MarcoTrek for
the report.
- Add "Average queries per session" and "Average queries duration per
session" in Sessions tab of the Global stat. Thanks to Guillaume
Lelarge for the feature request.
- Reformat numbers in pie graph tracker. Thanks to jirihlinka for the
report.
- pgbadger_tools: Add --explain-time-consuming and --explain-normalized
to generate explain statement about top time consuming and top
normalized slowest queries. Thanks to Josh Kupershmid fot the feature
request.
- Remove everything than error information from json output when -w |
--watch-mode is enable. Thanks to jason.
- Fix undefined subroutine encode_json when using -x json. Thanks to
jason for the report.
- Add auto detection of client=%h or remote=%h from the log so that
adding a prefix is not needed when it respect the default of pgbadger.
- Redefine sessions duration histogram bound to be more accurate. Thanks
to Guillaume Lelarge for the report.
- Add connection time histogram. Thanks to Guillaume Lelarge for the
feature request.
- Add new option -M | --no-multiline to not collect multi-line statement
to avoid garbage especially on errors that generate a huge report.
- Do not return SUCCESS error code 0 when aborted or something fails.
Thanks to Bruno Almeida for the patch.
2014-10-07 version 6.2
This is a maintenance release to fix a regression in SQL traffic graphs and
fix some other minor issues.
The release also add a new option -D or --dns-resolv to map client ip addresses
to FQDN without having log_hostname enabled on the postgresql's configuration
- Do not display queries in Slowest individual, Time consuming and
Normalized slowest queries reports when there is no duration in
log file. Display NO DATASET instead.
- Fix min/max queries in SQL traffic that was based on duration instead
of query count.
- Fix wrong unit to Synced files in Checkpoints files report. Thanks
to Levente Birta for the report.
- Enable allow_loose_quotes in Text::CSV_XS call to fix CSV parsing
error when fields have quote inside an unquoted field. Thanks to
Josh Berkus for the report.
- Add -D | --dns-resolv command line option to replace ip addresses
by their DNS name. Be warned that this can slow down pgBagder a lot.
Thanks to Jiri Hlinka for the feature request.
2014-09-25 version 6.1
This release fix some issues and adds some new features. It adds a new option
-B or --bar-graph to use bar instead of line in graphs. It will also keep tick
formatting when zooming.
The release also add a new program: pgbadger_tools to demonstrate how to
works with pgBadger binary files to build your own new feature. The first
tools 'explain-slowest' allow printing of top slowest queries as EXPLAIN
statements. There's also additional options to execute automatically the
statements with EXPLAIN ANALYZE and get the execution plan. See help of the
program for more information or the README file in the tools directory.
Some modifications will change certain behavior:
- The -T | --title text value will now be displayed instead of the
pgBadger label right after the logo. It was previously displayed
on mouse over the pgBadger label.
Here is the full list of changes/fixes:
- Change -T | --title position on pgBadger report. Title now override
the pgBadger label. Thanks to Julien Rouhauld for the patch.
- Add --file-per-query and --format-query option to write each slowest
query in separate file named qryXXX.sql and perform minimal formating
of the queries. Thanks to Rodolphe Quiedeville for the patch.
- Remove debug query from explain-slowest tool.
- Fix surge in sessions number report when an exclusion or inclusion
option (dbname, user, appname, etc.) is used. Thanks to suyah for the
report.
- Fix fatal error when remote log file is 0 size. Thanks to Julien
Rouhaud for the report.
- Allow pgbadger_tools --explain-slowest to automatically execute the
EXPLAIN statements an report the plan. See pgbadger_tools --help for
more explanation.
- Add --analyze option to replace EXPLAIN statements by EXPLAIN
(ANALYZE, VERBOSE, BUFFERS).
- Move pgbadger_tools program and README.tools into the tools/
subdirectory with removing the extension. Add more comments and
explanations.
- Fix case where die with interrupt signal is received when using -e
option. Thanks to Lloyd Albin for the report.
- Add a new program pgbadger_tools to demonstrate how to deal with
pgBadger binary files to build your own new feature. The first one
'explain-slowest' allow printing of top slowest queries as EXPLAIN
statements.
- Keep tick formatting when zooming. Thanks to Julien Rouhaud for the
patch.
- Fix automatic detection of rsyslogd logs. Thanks to David Day for
the report.
- Fix issue in calculating min/max/avg in "General Activity" report. It
was build on the sum of queries duration per minutes instead of each
duration. Thanks to Jayadevan M for the report.
- The same issue remains with percentile that are build using the sum of
duration per minutes and doesn't represent the real queries duration.
- This commit also include a modification in convert_time() method to
reports milliseconds.
- Add -B or --bar-graph command line option to use bar instead of line
in graph. Thanks to Bart Dopheide for the suggestion.
- Fix Checkpoint Wal files usage graph title.
2014-08-08 version 6.0
This new major release adds some new features like automatic cleanup of binary
files in incremental mode or maximum number of weeks for reports retention.
It improve the incremental mode with allowing the use of multiprocessing with
multiple log file.
It also adds report of query latency percentile on the general activity table
(percentiles are 90, 95, 99).
There's also a new output format: JSON. This format is good for sharing data
with other languages, which makes it easy to integrate pgBadger's result into
other monitoring tools.
You may want to expose your reports but not the data, using the --anonymize
option pgBadger will be able to anonymize all literal values in the queries.
Sometime select to copy a query from the report could be a pain. There's now
a click-to-select button in front of each query that allow you to just use
Ctrl+C to copy it on clipboard
The use of the new -X option also allow pgBadger to write out extra files to
the outdir when creating incremental reports. Those files are the CSS and
Javascript code normally repeated in each HTLM files.
Warning: the behavior of pgBadger in incremental mode has changed. It will now
always cleanup the output directory of all the obsolete binary file. If you were
using those files to build your own reports, you can prevent pgBadger to remove
them by using the --noclean option. Note that if you use the retention feature,
all those files in obsolete directories will be removed too.
Here is the complete list of changes.
- Javascript improvement to use only one call of sql_select and
sql_format. Use jQuery selector instead of getElementById to
avoid js errors when not found. Thanks to Julien Rouhaud for the
patches.
- Add -R | --retention command line option to set the maximum number of
week reports to preserve in the output directory for incremental mode.
Thanks to Kong Man for the feature request.
- Session count is immediately decreased when a FATAL error is received
in the current session to prevent overcount of simultaneous session
number. Thanks to Josh Berkus for the report.
- Fix issue in incremental mode when parsing is stopped after rotating
log and rotated log has new lines. The new file was not parsed at all.
Thanks to CZAirwolf for the report.
- Fix revert to single thread when last_line_parsed exists. Thanks to
Bruno Almeida for the report.
- Fix issue in handling SIGTERM/SIGINT that cause pgbadger to continue.
- Add autoclean feature to pgbadger in incremental mode. pgbadger will
now removed automatically obsolete binary files unless you specify
--noclean at command line.
- Add new command line option --anonymize to obscure all literals in
queries/errors to hide confidential data. Thanks to wmorancfi for the
feature request.
- Fix single "SELECT;" as a query in a report. Thanks to Marc Cousin for
the report.
- Add a copy icon in front of each query in the report to select the
entire query. Thanks to Josh Berkus for the feature request.
- Fix wrong move to beginning of a file if the file was modified after
have been parsed a time. Thanks to Herve Werner for the report.
- Allow pgBadger to write out extra files to outdir when creating
incremental reports. Require the use of the -X or --extra-files option
in incremental mode. Thanks to Matthew Musgrove for the feature request.
- Fix incomplete handling of XZ compressed format.
- Fix move to offset in incremental mode with multiprocess and incomplete
condition when file is smaller than the last offset. Thanks to Herve
Werner for the report.
- Allow/improve incremental mode with multiple log file and multiprocess.
- Fix incorrect location of temporary file storing last parsed line in
multiprocess+incremental mode. Thanks to Herve Werner for the report.
- Fix remote ssh command error sh: 2: Syntax error: "|" unexpected.
Thanks to Herve Werner for the report.
- Fix missing database name in samples of top queries reports. Thanks to
Thomas Reiss for the report.
- Add minimal documentation about JSON output format.
- Add execute attribute to pgbadger in the source repository, some may
find this more helpful when pgbadger is not installed and executed
directly from this repository.
- Fix issue with csv log format and incremental mode. Thanks to Suya for
the report and the help to solve the issue. There also a fix to support
autovacuum statistic with csv format.
- Fix bad URL to documentation. Thanks to Rodolphe Quiedeville for the report.
- Two minor change to made easier to use Tsung scenario: Remove the first
empty line and replace probability by weight. Now it is possible to use
the scenario as is with Tsung 1.5.
- Fix incremental mode where weeks on index page start on sunday and week
reports start on monday. Thanks to flopma and birkosan for the report.
- Replace label "More CPU costly" by "Highest CPU-cost". Thanks to Marc
Cousin for the suggestion.
- Add query latency percentile to General Activity table (percentiles are
90, 95, 99). Thanks to Himanchali for the patch.
- Fix typon pgbadger call. Thanks to Guilhem Rambal for the report.
- Add JSON support for output format. JSON format is good for sharing data
with other languages, which makes it easy to integrate pgBadger's result
into other monitoring tools like Cacti or Graphite. Thanks to Shanzhang
Lan for the patch.
- Update documentation about remote mode feature.
- Update documentation to inform that the xz utility should be at least in
version 5.05 to support the --robot command line option. Thanks to Xavier
Millies-Lacroix for the report.
- Fix remote logfile parsing. Thanks to Herve Werner for the report.
2014-05-05 version 5.1-1
- Fix parsing of remote log file, forgot to apply some patches.
Thank to Herve Werner for the report.
2014-05-04 version 5.1
This new release fixes several issues and adds several new features like:
* Support to named PREPARE and EXECUTE queries. They are replaced by
the real prepare statement and reported into top queries.
* Add new --exclude-line command line option for excluding immediately
log entries matching any regex.
* Included remote and client information into the most frequent events.
* pgBadger is now able to parse remote logfiles using a password less
ssh connection and generate locally the reports.
* Histogram granularity can be adjusted using the -A command line option.
* Add new detail information on top queries to show when the query is a
bind query.
* Support to logfile compressed using the xz compression format.
* Change week/day menu in incremental index, it is now represented as
usual with a calendar view per month.
* Fix various compatibility issue with Windows and Perl 5.8
Here is the full list of changes:
- fixed calendar display and correct typo. Thanks to brunomgalmeida
for the patch.
- revert to single thread if file is small. Thanks to brunomgalmeida
for the patch.
- print calendars 4+4+4 instead of 3+4+4+1 when looking at full year.
Thanks to brunomgalmeida for the patch.
- Add --exclude-line option for excluding log entries with a regex based
on the full log line. Thanks to ferfebles for the feature request.
- Fix SQL keywords that was beautified twice.
- Remove duplicate pg_keyword in SQL beautifier.
- Fix increment of session when --disable-session is activated.
- Fix missing unit in Checkpoints Activity report when time value is
empty. Thanks to Herve Werner for the report.
- Fix double information in histogram data when period is the hour.
- Add support to named PREPARE and EXECUTE queries. Calls to EXECUTE
statements are now replaced by the prepared query and show samples
with parameters. Thanks to Brian DeRocher for the feature request.
- Included Remote and Client information into the most frequent events
examples. Thanks to brunomgalmeida for the patch.
- Fix documentation about various awkward phrasings, grammar, and
spelling. Consistently capitalize "pgBadger" as such, except for
command examples which should stay all-lowercase. Thanks to Josh
Kupershmidt for the patch.
- Fix incremental mode on Windows by replacing %F and %u POSIX::strftime
format to %Y-%m-%d and %w. Thanks to dthiery for the report.
- Remove Examples button when there is no examples available.
- Fix label on tips in histogram of errors reports.
- Fix error details in incremental mode in Most Frequent Errors/Events
report. Thanks to Herve Werner for the report.
- Fix Sync time value in Checkpoints buffers report. Thanks to Herve
Werner for the report.
- Fix wrong connections per host count. Thanks to Herve Werner for the
report.
- Allow pgBadger to parse remote log file using a password less ssh
connection. Thanks to Orange OLPS department for the feature request.
- Histogram granularity can be adjusted using the -A command line
option. By default they will report the mean of each top queries or
errors occurring per hour. You can now specify the granularity down to
the minute. Thanks to Orange OLPS department for the feature request.
- Add new detail information on top queries to show when the query is
a bind query. Thanks to Orange OLPS department for the feature request.
- Fix queries that exceed the size of the container.
- Add unit (seconds) to checkpoint write/sync time in the checkpoints
activity report. Thanks to Orange OLPS department for the report.
- Fix missing -J option in usage.
- Fix incomplete lines in split logfile to rewind to the beginning of
the line. Thanks to brunomgalmeida for the patch.
- Fix tsung output and add tsung xml header sample to output file.
- Make it possible to do --sample 0 (prior it was falling back to the
default of 3). Thanks to William Moran for the patch.
- Fix xz command to be script readable and always have size in bytes:
xz --robot -l %f | grep totals | awk "{print $5}"
- Add support to logfile compressed by the xz compression format.
Thanks to Adrien Nayrat for the patch.
- Do not increment queries duration histogram when prepare|parse|bind
log are found, but only with execute log. Thanks to Josh Berkus for
the report.
- Fix normalization of error message about unique violation when
creating intermediate dirs. Thanks to Tim Sampson for the report.
- Allow use of Perl metacharacters like [..] in application name.
Thanks to Magnus Persson for the report.
- Fix dataset tip to be displayed above image control button. Thanks
to Ronan Dunklau for the fix.
- Renamed the Reset bouton to "To Chart" to avoid confusion with unzoom
feature.
- Fix writing of empty incremental last parsed file.
- Fix several other graphs
- Fix additional message at end of query or error when it was logged
from application output. Thanks to Herve Werner for the report.
- Fix checkpoint and vacuum graphs when all dataset does not have all
values. Thanks to Herve Werner for the report.
- Fix week numbered -1 in calendar view.
- Change week/day menu in incremental index, it is now represented as
usual with a calendar view per month. Thanks to Thom Brown for the
feature request.
- Load FileHandle to fix error: Can not locate object method "seek"
via package "IO::Handle" with perl 5.8. Thanks to hkrizek for the
report.
- Fix count of queries in progress bar when there is compressed file
and multiprocess is enabled. Thanks to Johnny Tan for the report.
- Fix debug message "Start parsing at offset"
- Add ordering in queries times histogram. Thanks to Ulf Renman for
the report.
- Fix various typos. Thanks to Thom Brown for the patch.
- Fix Makefile error, "WriteMakefile: Need even number of args at
Makefile.PL" with Perl 5.8. Thanks to Fangr Zhang for the report.
- Fix some typo in Changelog
2014-02-05 version 5.0
This new major release adds some new features like incremental mode and SQL
queries times histogram. There is also a hourly graphic representation of the
count and average duration of top normalized queries. Same for errors or events,
you will be able to see graphically at which hours they are occurring the most
often.
The incremental mode is an old request issued at PgCon Ottawa 2012 that concern
the ability to construct incremental reports with successive runs of pgBadger.
It is now possible to run pgbadger each days or even more, each hours, and have
cumulative reports per day and per week. A top index page allow you to go
directly to the weekly and daily reports.
This mode have been build with simplicity in mind so running pgbadger by cron
as follow:
0 23 * * * pgbadger -q -I -O /var/www/pgbadger/ /var/log/postgresql.log
is enough to have daily and weekly reports viewable using your browser.
You can take a look at a sample report at http://dalibo.github.io/pgbadger/demov5/index.html
There's also a useful improvement to allow pgBadger to seek directly to the
last position in the same log file after a successive execution. This feature
is only available using the incremental mode or the -l option and parsing a
single log file. Let's say you have a weekly rotated log file and want to run
pgBadger each days. With 2GB of log per day, pgbadger was spending 5 minutes
per block of 2 GB to reach the last position in the log, so at the end of the
week this feature will save you 35 minutes. Now pgBadger will start parsing
new log entries immediately. This feature is compatible with the multiprocess
mode using -j option (n processes for one log file).
Histogram of query times is a new report in top queries slide that shows the
query times distribution during the analyzed period. For example:
Range Count Percentage
--------------------------------------------
0-1ms 10,367,313 53.52%
1-5ms 799,883 4.13%
5-10ms 451,646 2.33%
10-25ms 2,965,883 15.31%
25-50ms 4,510,258 23.28%
50-100ms 180,975 0.93%
100-500ms 87,613 0.45%
500-1000ms 5,856 0.03%
1000-10000ms 2,697 0.01%
> 10000ms 74 0.00%
There is also some graphic and report improvements, like the mouse tracker
formatting that have been reviewed. It now shows a vertical crosshair and
all dataset values at a time when mouse pointer moves over series. Automatic
queries formatting has also been changed, it is now done on double click
event as simple click was painful when you want to copy some part of the
queries.
The report "Simultaneous Connections" has been relabeled into "Established
Connections", it is less confusing as many people think that this is the number
of simultaneous sessions, which is not the case. It only count the number of
connections established at same time.
Autovacuum reports now associate database name to the autovacuum and autoanalyze
entries. Statistics now refer to "dbname.schema.table", previous versions was only
showing the pair "schema.table".
This release also adds Session peak information and a report about Simultaneous
sessions. Parameters log_connections and log_disconnections must be enabled in
postgresql.conf.
Complete ChangeLog:
- Fix size of SQL queries columns to prevent exceeding screen width.
- Add new histogram reports on top normalized queries and top errors
or event. It shows at what hours and in which quantity the queries
or errors appears.
- Add seeking to last parser position in log file in incremental mode.
This prevent parsing all the file to find the last line parse from
previous run. This only works when parsing a single flat file, -j
option is permitted. Thanks to ioguix for the kick.
- Rewrite reloading of last log time from binary files.
- Fix missing statistics of last parsed queries in incremental mode.
- Fix bug in incremental mode that prevent reindexing a previous day.
Thanks to Martin Prochazka for the great help.
- Fix missing label "Avg duration" on column header in details of Most
frequent queries (N).
- Add vertical crosshair on graph.
- Fix case where queries and events was not updated when using -b and
-e command line. Thanks to Nicolas Thauvin for the report.
- Fix week sorting on incremental report main index page. Thanks to
Martin Prochazka for the report.
- Add "Histogram of query times" report to show statistics like
0-100ms : 80%, 100-500ms :14%, 500-1000ms : 3%, > 1000ms : 1%.
Thanks to tmihail for the feature request.
- Format mouse tracker on graphs to show all dataset value at a time.
- Add control of -o vs -O option with incremental mode to prevent
wrong use.
- Change log level of missing LAST_PARSED.tmp file to WARNING and
add a HINT.
- Update copyright date to 2014
- Fix empty reports of connections. Thanks to Reeshna Ramakrishnan
for the report.
- Fix display of connections peak when no connection was reported.
- Fix warning on META_MERGE for ExtUtils::MakeMaker < 6.46. Thanks
to Julien Rouhaud for the patch.
- Add documentation about automatic incremental mode.
- Add incremental mode to pgBadger. This mode will build a report
per day and a cumulative report per week. It also create an index
interface to easiest access to the different report. Must be run,
for example, as:
pgbadger /var/log/postgresql.log.1 -I -O /var/www/pgbadger/
after a daily PostgreSQL log file rotation.
- Add -O | --outdir path to specify the directory where out file
must be saved.
- Automatic queries formatting is now done on double click event,
simple click was painful when you want to copy some part of the
queries. Thanks to Guillaume Smet for the feature request.
- Remove calls of binmode to force html file output to be utf8 as
there is some bad side effect. Thanks to akorotkov for the report.
- Remove use of Time::HiRes Perl module as some distributions does
not include this module by default in core Perl install.
- Fix "Wide character in print" Perl message by setting binmode
to :utf8. Thanks to Casey Allen Shobe for the report.
- Fix application name search regex to handle application name with
space like "pgAdmin III - Query Tool".
- Fix wrong timestamps saved with top queries. Thanks to Herve Werner
for the report.
- Fix missing logs types statitics when using binary mode. Thanks to
Herve Werner for the report.
- Fix Queries by application table column header: Database replaced
by Application. Thanks to Herve Werner for the report.
- Add "Max number of times the same event was reported" report in
Global stats Events tab.
- Replace "Number of errors" by "Number of ERROR entries" and add
"Number of FATAL entries".
- Replace "Number of errors" by "Number of events" and "Total errors
found" by "Total events found" in Events reports. Thanks to Herve
Werner for the report.
- Fix title error in Sessions per database.
- Fix clicking on the info link to not go back to the top of the page.
Thanks to Guillaume Smet for the report and solution.
- Fix incremental report from binary output where binary data was not
loaded if no queries were present in log file. Thanks to Herve Werner
for the report.
- Fix parsing issue when log_error_verbosity = verbose. Thanks to vorko
for the report.
- Add Session peak information and a report about Simultaneous sessions.
log_connections+log_disconnections must be enabled in postgresql.conf.
- Fix wrong requests number in Queries by user and by host. Thanks to
Jehan-Guillaume de Rorthais for the report.
- Fix issue with rsyslog format failing to parse logs. Thanks to Tim
Sampson for the report.
- Associate autovacuum and autoanalyze log entry to the corresponding
database name. Thanks to Herve Werner for the feature request.
- Change "Simultaneous Connections" label into "Established Connections",
it is less confusing as many people think that this is the number of
simultaneous sessions, which is not the case. It only count the number
of connections established at same time. Thanks to Ronan Dunklau for
the report.
2013-11-08 version 4.1
This release fixes two major bugs and some others minor issues. There's also a
new command line option --exclude-appname that allow exclusion from the report
of queries generated by a specific program, like pg_dump. Documentation have
been updated with a new chapter about building incremental reports.
- Add log_autovacuum_min_duration into documentation in chapter about
postgresql configuration directives. Thanks to Herve Werner for the
report.
- Add chapter about "Incremental reports" into documentation.
- Fix reports with per minutes average where last time fraction was
not reported. Thanks to Ludovic Levesque and Vincent Laborie for the
report.
- Fix unterminated comment in information popup. Thanks to Ronan
Dunklau for the patch.
- Add --exclude-appname command line option to eliminate unwanted
traffic generated by a specific application. Thanks to Steve Crawford
for the feature request.
- Allow external links use into URL to go to a specific report. Thanks
to Hubert depesz Lubaczewski for the feature request.
- Fix empty reports when parsing compressed files with the -j option
which is not allowed with compressed file. Thanks to Vincent Laborie
for the report.
- Prevent progress bar length to increase after 100% when real size is
greater than estimated size (issue found with huge compressed file).
- Correct some spelling and grammar in ChangeLog and pgbadger. Thanks
to Thom Brown for the patch.
- Fix major bug on SQL traffic reports with wrong min value and bad
average value on select reports, add min/max for select queries.
Thanks to Vincent Laborie for the report.
2013-10-31 - Version 4.0
This major release is the "Say goodbye to the fouine" release. With a full
rewrite of the reports design, pgBadger has now turned the HTML reports into
a more intuitive user experience and professional look.
The report is now driven by a dynamic menu with the help of the embedded
Bootstrap library. Every main menu corresponds to a hidden slide that is
revealed when the menu or one of its submenus is activated. There's
also the embedded font Font Awesome webfont to beautify the report.
Every statistics report now includes a key value section that immediately
shows you some of the relevant information. Pie charts have also been
separated from their data tables using two tabs, one for the chart and the
other one for the data.
Tables reporting hourly statistics have been moved to a multiple tabs report
following the data. This is used with General (queries, connections, sessions),
Checkpoints (buffer, files, warnings), Temporary files and Vacuums activities.
There's some new useful information shown in the key value sections. Peak
information shows the number and datetime of the highest activity. Here is the
list of those reports:
- Queries peak
- Read queries peak
- Write queries peak
- Connections peak
- Checkpoints peak
- WAL files usage Peak
- Checkpoints warnings peak
- Temporary file size peak
- Temporary file number peak
Reports about Checkpoints and Restartpoints have been merged into a single report.
These are almost one in the same event, except that restartpoints occur on a slave
cluster, so there was no need to distinguish between the two.
Recent PostgreSQL versions add additional information about checkpoints, the
number of synced files, the longest sync and the average of sync time per file.
pgBadger collects and shows this information in the Checkpoint Activity report.
There's also some new reports:
- Prepared queries ratio (execute vs prepare)
- Prepared over normal queries
- Queries (select, insert, update, delete) per user/host/application
- Pie charts for tables with the most tuples and pages removed during vacuum.
The vacuum report will now highlight the costly tables during a vacuum or
analyze of a database.
The errors are now highlighted by a different color following the level.
A LOG level will be green, HINT will be yellow, WARNING orange, ERROR red
and FATAL dark red.
Some changes in the binary format are not backward compatible and the option
--client has been removed as it has been superseded by --dbclient for a long time now.
If you are running a pg_dump or some batch process with very slow queries, your
report analysis will be hindered by those queries having unwanted prominence in the
report. Before this release it was a pain to exclude those queries from the
report. Now you can use the --exclude-time command line option to exclude all
traces matching the given time regexp from the report. For example, let's say
you have a pg_dump at 13:00 each day during half an hour, you can use pgbadger
as follows:
pgbadger --exclude-time "2013-09-.* 13:.*" postgresql.log
If you are also running a pg_dump at night, let's say 22:00, you can write it
as follows:
pgbadger --exclude-time '2013-09-\d+ 13:[0-3]' --exclude-time '2013-09-\d+ 22:[0-3]' postgresql.log
or more shortly:
pgbadger --exclude-time '2013-09-\d+ (13|22):[0-3]' postgresql.log
Exclude time always requires the iso notation yyyy-mm-dd hh:mm:ss, even if log
format is syslog. This is the same for all time-related options. Use this option
with care as it has a high cost on the parser performance.
2013-09-17 - version 3.6
Still an other version in 3.x branch to fix two major bugs in vacuum and checkpoint
graphs. Some other minors bugs has also been fixed.
- Fix grammar in --quiet usage. Thanks to stephen-a-ingram for the report.
- Fix reporting period to starts after the last --last-parsed value instead
of the first log line. Thanks to Keith Fiske for the report.
- Add --csv-separator command line usage to documentation.
- Fix CSV log parser and add --csv-separator command line option to allow
change of the default csv field separator, coma, in any other character.
- Avoid "negative look behind not implemented" errors on perl 5.16/5.18.
Thanks to Marco Baringer for the patch.
- Support timestamps for begin/end with fractional seconds (so it'll handle
postgresql's normal string representation of timestamps).
- When using negative look behind set sub-regexp to -i (not case insensitive)
to avoid issues where some upper case letter sequence, like SS or ST.
- Change shebang from /usr/bin/perl to /usr/bin/env perl so that user-local
(perlbrew) perls will get used.
- Fix empty graph of autovacuum and autoanalyze.
- Fix checkpoint graphs that was not displayed any more.
2013-07-11 - Version 3.5
Last release of the 3.x branch, this is a bug fix release that also adds some
pretty print of Y axis number on graphs and a new graph that groups queries
duration series that was shown as second Y axis on graphs, as well as a new
graph with number of temporary file that was also used as second Y axis.
- Split temporary files report into two graphs (files size and number
of file) to no more used a second Y axis with flotr2 - mouse tracker
is not working as expected.
- Duration series representing the second Y axis in queries graph have
been removed and are now drawn in a new "Average queries duration"
independant graph.
- Add pretty print of numbers in Y axis and mouse tracker output with
PB, TB, GB, KB, B units, and seconds, microseconds. Number without
unit are shown with P, T, M, K suffix for easiest very long number
reading.
- Remove Query type reports when log only contains duration.
- Fix display of checkpoint hourly report with no entry.
- Fix count in Query type report.
- Fix minimal statistics output when nothing was load from log file.
Thanks to Herve Werner for the report.
- Fix several bug in log line parser. Thanks to Den Untevskiy for the
report.
- Fix bug in last parsed storage when log files was not provided in the
right order. Thanks to Herve Werner for the report.
- Fix orphan lines wrongly associated to previous queries instead of
temporary file and lock logged statement. Thanks to Den Untevskiy for
the report.
- Fix number of different samples shown in events report.
- Escape HTML tags on error messages examples. Thanks to Mael Rimbault
for the report.
- Remove some temporary debug informations used with some LOG messages
reported as events.
- Fix several issues with restartpoint and temporary files reports.
Thanks to Guillaume Lelarge for the report.
- Fix issue when an absolute path was given to the incremental file.
Thanks to Herve Werner for the report.
- Remove creation of incremental temp file $tmp_last_parsed when not
running in multiprocess mode. Thanks to Herve Werner for the report.
2013-06-18 - Version 3.4
This release adds lot of graphic improvements and a better rendering with logs
over few hours. There's also some bug fixes especially on report of queries that
generate the most temporary files.
- Update flotr2.min.js to latest github code.
- Add mouse tracking over y2axis.
- Add label/legend information to ticks displayed on mouseover graphs.
- Fix documentation about log_statement and log_min_duration_statement.
Thanks to Herve Werner for the report.
- Fix missing top queries for locks and temporary files in multiprocess
mode.
- Cleanup code to remove storage of unused information about connection.
- Divide the huge dump_as_html() method with one method per each report.
- Checkpoints, restart points and temporary files are now drawn using a
period of 5 minutes per default instead of one hour. Thanks to Josh
Berkus for the feature request.
- Change fixed increment of one hour to five minutes on queries graphs
"SELECT queries" and "Write queries". Remove graph "All queries" as,
with a five minutes increment, it duplicates the "Queries per second".
Thanks to Josh Berkus for the feature request.
- Fix typos. Thanks to Arsen Stasic for the patch.
- Add default HTML charset to utf-8 and a command line option --charset
to be able to change the default. Thanks to thomas hankeuhh for the
feature request.
- Fix missing temporary files query reports in some conditions. Thanks
to Guillaume Lelarge and Thomas Reiss for the report.
- Fix some parsing issue with log generated by pg 7.4.
- Update documentation about missing new reports introduced in previous
version 3.3.
Note that it should be the last release of the 3.x branch unless there's major
bug fixes, but next one will be a major release with a completely new design.
2013-05-01 - Version 3.3
This release adds four more useful reports about queries that generate locks and
temporary files. An other new report about restart point on slaves and several
bugs fix or cosmetic change. Support to parallel processing under Windows OS has
been removed.
- Remove parallel processing under Windows platform, the use of waitpid
is freezing pgbadger. Thanks to Saurabh Agrawal for the report. I'm
not comfortable with that OS this is why support have been removed,
if someone know how to fix that, please submit a patch.
- Fix Error in tempfile() under Windows. Thanks to Saurabh Agrawal for
the report.
- Fix wrong queries storage with lock and temporary file reports. Thanks
to Thomas Reiss for the report.
- Add samples queries to "Most frequent waiting queries" and "Queries
generating the most temporary files" report.
- Add two more reports about locks: 'Most frequent waiting queries (N)",
and "Queries that waited the most". Thanks to Thomas Reiss for the
patch.
- Add two reports about temporary files: "Queries generating the most
temporary files (N)" and "Queries generating the largest temporary
files". Thanks to Thomas Reiss for the patch.
- Cosmetic change to the Min/Max/Avg duration columns.
- Fix report of samples error with csvlog format. Thanks to tpoindessous
for the report.
- Add --disable-autovacuum to the documentation. Thanks to tpoindessous
for the report.
- Fix unmatched ) in regex when using %s in prefix.
- Fix bad average size of temporary file in Overall statistics report.
Thanks to Jehan Guillaume de Rorthais for the report.
- Add restartpoint reporting. Thanks to Guillaume Lelarge for the patch.
- Made some minor change in CSS.
- Replace %% in log line prefix internally by a single % so that it
could be exactly the same than in log_line_prefix. Thanks to Cal
Heldenbrand for the report.
- Fix perl documentation header, thanks to Cyril Bouthors for the patch.
2013-04-07 - Version 3.2
This is mostly a bug fix release, it also adds escaping of HTML code inside
queries and the adds Min/Max reports with Average duration in all queries
reports.
- In multiprocess mode, fix case where pgbadger does not update
the last-parsed file and do not take care of the previous run.
Thanks to Kong Man for the report.
- Fix case where pgbadger does not update the last-parsed file.
Thanks to Kong Man for the report.
- Add CDATA to make validator happy. Thanks to Euler Taveira de
Oliveira for the patch.
- Some code review by Euler Taveira de Oliveira, thanks for the
patch.
- Fix case where stat were multiplied by N when -J was set to N.
Thanks to thegnorf for the report.
- Add a line in documentation about log_statement that disable
log_min_duration_statement when it is set to all.
- Add quick note on how to contribute, thanks to Damien Clochard
for the patch.
- Fix issue with logs read from stdin. Thanks to hubert depesz
lubaczewski for the report.
- Force pgbadger to not try to beautify queries bigger than 10kb,
this will take too much time. This value can be reduce in the
future if hang with long queries still happen. Thanks to John
Rouillard for the report.
- Fix an other issue in replacing bind param when the bind value
is alone on a single line. Thanks to Kjeld Peters for the report.
- Fix parsing of compressed files together with uncompressed files
using the the -j option. Uncompressed files are now processed using
split method and compressed ones are parsed per one dedicated process.
- Replace zcat by gunzip -c to fix an issue on MacOsx. Thanks to
Kjeld Peters for the report.
- Escape HTML code inside queries. Thanks to denstark for the report.
- Add Min/Max in addition to Average duration values in queries reports.
Thanks to John Rouillard fot the feature request.
- Fix top slowest array size with binary format.
- Fix an other case with bind parameters with value in next line and
the top N slowest queries that was repeated until N even if the real
number of queries was lower. Thanks to Kjeld Peters for the reports.
- Fix non replacement of bind parameters where there is line breaks in
the parameters, aka multiline bind parameters. Thanks to Kjeld Peters
for the report.
- Fix error with seekable export tag with Perl v5.8. Thanks to Jeff Bohmer
for the report.
- Fix parsing of non standard syslog lines begining with a timestamp like
"2013-02-28T10:35:11-05:00". Thanks to Ryan P. Kelly for the report.
- Fix issue #65 where using -c | --dbclient with csvlog was broken. Thanks
to Jaime Casanova for the report.
- Fix empty report in watchlog mode (-w option).
2013-02-21 - Version 3.1
This is a quick release to fix missing reports of most frequent errors and slowest
normalized queries in previous version published yesterday.
- Fix empty report in watchlog mode (-w option).
- Force immediat die on command line options error.
- Fix missing report of most frequent events/errors report. Thanks to
Vincent Laborie for the report.
- Fix missing report of slowest normalized queries. Thanks to Vincent
Laborie for the report.
- Fix display of last print of progress bar when quiet mode is enabled.
2013-02-20 - Version 3.0
This new major release adds parallel log processing by using as many cores as
wanted to parse log files, the performances gain is directly related to the
number of cores specified. There's also new reports about autovacuum/autoanalyze
informations and many bugs have been fixed.
- Update documentation about log_duration, log_min_duration_statement
and log_statement.
- Rewrite dirty code around log timestamp comparison to find timestamp
of the specified begin or ending date.
- Remove distinction between logs with duration enabled from variables
log_min_duration_statement and log_duration. Commands line options
--enable-log_duration and --enable-log_min_duration have been removed.
- Update documentation about parallel processing.
- Remove usage of Storable::file_magic to autodetect binary format file,
it is not include in core perl 5.8. Thanks to Marc Cousin for the
report.
- Force multiprocess per file when files are compressed. Thanks to
Julien Rouhaud for the report.
- Add progress bar logger for multiprocess by forking a dedicated
process and using pipe. Also fix some bugs in using binary format
that duplicate query/error samples per process.
- chmod 755 pgbadger
- Fix checkpoint reports when there is no checkpoint warnings.
- Fix non report of hourly connections/checkpoint/autovacuum when not
query is found in log file. Thanks to Guillaume Lelarge for the
report.
- Add better handling of signals in multiprocess mode.
- Add -J|--job_per_file command line option to force pgbadger to use
one process per file instead of using all to parse one file. Useful
to have better performances with lot of small log file.
- Fix parsing of orphan lines with stderr logs and log_line_prefix
without session information into the prefix (%l).
- Update documentation about -j | --jobs option.
- Allow pgbadger to use several cores, aka multiprocessing. Add options
-j | --jobs option to specify the number of core to use.
- Add autovacuum and autoanalyze infos to binary format.
- Fix case in SQL code highlighting where QQCODE temp keyword was not
replaced. Thanks to Julien Ruhaud for the report.
- Fix CSS to draw autovacuum graph and change legend opacity.
- Add pie graph to show repartition of number of autovacuum per table
and number of tuples removed by autovacuum per table.
- Add debug information about selected type of log duration format.
- Add report of tuples/pages removed in report of Vacuums by table.
- Fix major bug on syslog parser where years part of the date was
wrongly extracted from current date with logs generated in 2012.
- Fix issue with Perl 5.16 that do not allow "ss" inside look-behind
assertions. Thanks to Cedric for the report.
- New vacuum and analyze hourly reports and graphs. Thanks to Guillaume
Lelarge for the patch.
UPGRADE: if you are running pgbadger by cron take care if you were using one of
the following option: --enable-log_min_duration and --enable-log_duration, they
have been removed and pgbadger will refuse to start.
2013-01-17 - Version 2.3
This release fixes several major issues especially with csvlog and a memory leak
with log parsing using a start date. There's also several improvement like new
reports of number of queries by database and application. Mouse over reported
queries will show database, user, remote client and application name where they
are executed.
A new binary input/output format have been introduced to allow saving or reading
precomputed statistics. This will allow incremental reports based on periodical
runs of pgbader. This is a work in progress fully available with next coming
major release.
Several SQL code beautifier improvement from pgFormatter have also been merged.
- Clarify misleading statement about log_duration: log_duration may be
turned on depending on desired information. Only log_statement must
not be on. Thanks to Matt Romaine for the patch.
- Fix --dbname and --dbuser not working with csvlog format. Thanks to
Luke Cyca for the report.
- Fix issue in SQL formatting that prevent left back indentation when
major keywords were found. Thanks to Kevin Brannen for the report.
- Display 3 decimals in time report so that ms can be seen. Thanks to
Adam Schroder for the request.
- Force the parser to not insert a new line after the SET keyword when
the query begin with it. This is to preserve the single line with
queries like SET client_encoding TO "utf8";
- Add better SQL formatting of update queries by adding a new line
after the SET keyword. Thanks to pilat66 for the report.
- Update copyright and documentation.
- Queries without application name are now stored under others
application name.
- Add report of number of queries by application if %a is specified in
the log_line_prefix.
- Add link menu to the request per database and limit the display of
this information when there is more than one database.
- Add report of requests per database.
- Add report of user,remote client and application name to all request
info.
- Fix memory leak with option -b (--begin) and in incremental log
parsing mode.
- Remove duration part from log format auto-detection. Thanks to
Guillaume Lelarge for the report.
- Fix a performance issue on prettifying SQL queries that makes pgBagder
several time slower that usual to generate the HTML output. Thanks to
Vincent Laborie for the report.
- Add missing SQL::Beautify paternity.
- Add 'binary' format as input/output format. The binary output format
allows to save log statistics in a non human readable file instead of
an HTML or text file. These binary files might then be used as regular
input files, combined or not, to produce a html or txt report. Thanks
to Jehan Guillaume de Rorthais for the patch.
- Remove port from the session regex pattern to match all lines.
- Fix the progress bar. It was trying to use gunzip to get real file
size for all formats (by default). Unbreak the bz2 format (that does
not report real size) and add support for zip format. Thanks to Euler
Taveira de Oliveira fort the patch.
- Fix some typos and grammatical issues. Thanks to Euler Taveira de
Oliveira fort the patch.
- Improve SQL code highlighting and keywords detection merging change
from pgFormatter project.
- Add support to hostname or ip address in the client detection. Thanks
to stuntmunkee for the report.
- pgbadger will now only reports execute statement of the extended
protocol (parse/bind/execute). Thanks to pierrestroh for the report.
- Fix numerous typos as well as formatting and grammatical issues.
Thanks to Thom Brown for the patch.
- Add backward compatibility to obsolete --client command line option.
If you were using the short option -c nothing is changed.
- Fix issue with --dbclient and %h in log_line_prefix. Thanks to Julien
Rouhaud for the patch.
- Fix multiline progress bar output.
- Allow usage of a dash into database, user and application names when
prefix is used. Thanks to Vipul for the report.
- Mouse over queries will now show in which database they are executed
in the overviews (Slowest queries, Most frequent queries, etc. ).
Thank to Dirk-Jan Bulsink for the feature request.
- Fix missing keys on %cur_info hash. Thanks to Marc Cousin for the
report.
- Move opening file handle to log file into a dedicated function.
Thanks to Marc Cousin for the patch.
- Replace Ctrl+M by printable \r. Thanks to Marc Cousin for the report.
2012-11-13 - Version 2.2
This release add some major features like tsung output, speed improvement with
csvlog, report of shut down events, new command line options to generate report
excluding some user(s), to build report based on select queries only, to specify
regex of the queries that must only be included in the report and to remove
comments from queries. Lot of bug fixes, please upgrade.
- Update PostgreSQL keywords list for 9.2
- Fix number of queries in progress bar with tsung output.
- Remove obsolete syslog-ng and temporary syslog-ll log format added to
fix some syslog autodetection issues. There is now just one syslog
format: syslog, differences between syslog formats are detected and
the log parser is adaptive.
- Add comment about the check_incremental_position() method
- Fix reports with empty graphs when log files were not in chronological
order.
- Add report of current total of queries and events parsed in progress
bar. Thanks to Jehan-Guillaume de Rorthais for the patch.
- Force pgBadger to use an require the XS version of Text::CSV instead
of the Pure Perl implementation. It is a good bit faster thanks to
David Fetter for the patch. Note that using csvlog is still a bit
slower than syslog or stderr log format.
- Fix several issue with tsung output.
- Add report of shut down events
- Add debug information on command line used to pipe compressed log
file when -v is provide.
- Add -U | --exclude-user command line option to generate report
excluded user. Thanks to Birta Levente for the feature request.
- Allow some options to be specified multiple time or be written as a
coma separated list of value, here are these options: --dbname,
--dbuser, --dbclient, --dbappname, --exclude_user.
- Add -S | --select-only option to build report only on select queries.
- Add first support to tsung output, see usage. Thanks to Guillaume
Lelarge for the feature request.
- Add --include-query and --include-file to specify regex of the queries
that must only be included in the report. Thanks to Marc Cousin for
the feature request.
- Fix auto detection of log_duration and log_min_duration_statement
format.
- Fix parser issue with Windows logs without timezone information.
Thanks to Nicolas Thauvin for the report.
- Fix bug in %r = remote host and port log line prefix detection.
Thanks to Hubert Depesz Lubaczewski for the report.
- Add -C | --nocomment option to remove comment like /* ... */ from
queries. Thanks to Hubert Depesz Lubaczewski for the feature request.
- Fix escaping of log_line_prefix. Thanks to Hubert Depesz Lubaczewski
for the patch.
- Fix wrong detection of update queries when a query has a object names
containing update and set. Thanks to Vincent Laborie for the report.
2012-10-10 - Version 2.1
This release add a major feature by allowing any custom log_line_prefix to be
used by pgBadger. With stderr output you at least need to log the timestamp (%t)
the pid (%p) and the session/line number (%l). Support to log_duration instead
of log_min_duration_statement to allow reports simply based on duration and
count report without query detail and report. Lot of bug fixes, please upgrade
asap.
- Add new --enable-log_min_duration option to force pgbadger to use lines
generated by the log_min_duration_statement even if the log_duration
format is autodetected. Useful if you use both but do not log all queries.
Thanks to Vincent Laborie for the feature request.
- Add syslog-ng format to better handle syslog traces with notation like:
[ID * local2.info]. It is autodetected but can be forced in the -f option
with value set to: syslog-ng.
- Add --enable-log_duration command line option to force pgbadger to only
use the log_duration trace even if log_min_duration_statement traces are
autodetected.
- Fix display of empty hourly graph when no data were found.
- Remove query type report when log_duration is enabled.
- Fix a major bug in query with bind parameter. Thanks to Marc Cousin for
the report.
- Fix detection of compressed log files and allow automatic detection
and uncompress of .gz, .bz2 and .zip files.
- Add gunzip -l command to find the real size of a gzip compressed file.
- Fix log_duration only reports to not take care about query detail but
just count and duration.
- Fix issue with compressed csvlog. Thanks to Philip Freeman for the
report.
- Allow usage of log_duration instead of log_min_duration_statement to
just collect statistics about the number of queries and their time.
Thanks to Vincent Laborie for the feature request.
- Fix issue on syslog format and autodetect with additional info like:
[ID * local2.info]. Thanks to kapsalar for the report.
- Removed unrecognized log line generated by deadlock_timeout.
- Add missing information about unsupported csv log input from stdin.
It must be read from a file. Thank to Philip Freeman for the report.
- Fix issue #28: Illegal division by zero with log file without query
and txt output. Thanks to rlowe for the report.
- Update documentation about the -N | --appname option.
- Rename --name option into --appname. Thanks to Guillaume Lellarge for
the patch.
- Fix min/max value in xasis that was always represented 2 days by
default. Thanks to Casey Allen Shobe for the report.
- Fix major bug when running pgbadger with the -e option. Thanks to
Casey Allen Shobe for the report and the great help
- Change project url to http://dalibo.github.com/pgbadger/. Thanks to
Damien Clochard for this new hosting.
- Fix lot of issues in CSV parser and force locale to be C. Thanks to
Casey Allen Shobe for the reports.
- Improve speed with custom log_line_prefix.
- Merge pull request #26 from elementalvoid/helpdoc-fix
- Fixed help text for --exclude-file. Old help text indicated that the
option name was --exclude_file which was incorrect.
- Remove the obsolete --regex-user and --regex-db options that was used
to specify a search pattern in the log_line_prefix to find the user
and db name. This is replaced by the --prefix option.
- Replace Time column report header by Hour.
- Fix another issue in log_line_prefix parser with stderr format
- Add a more complex example using log_line_prefix
- Fix log_line_prefix issue when using timepstamp with millisecond.
- Add support to use any custom log_line_prefix with new option -p or
--prefix. See README for an example.
- Fix false autodetection of CSV format when log_statement is enable or
in possible other cases. This was resulting in error: "FATAL: cannot
use CSV". Thanks to Thomas Reiss for the report.
- Fix display of empty graph of connections per seconds
- Allow character : in log line prefix, it will no more break the log
parsing. Thanks to John Rouillard for the report.
- Add report of configuration parameter changes into the errors report
and change errors report by events report to handle important messages
that are not errors.
- Allow pgbadger to recognize " autovacuum launcher" messages.
2012-08-21 - version 2.0
This major version adds some changes not backward compatible with previous
versions. Options -p and -g are not more used as progress bar and graphs
generation are enabled by default now.
The obsolete -l option use to specify the log file to parse has been reused to
specify an incremental file. Outside these changes and some bug fix there's
also new features:
* Using an incremental file with -l option allow to parse multiple time a
single log file and to "seek" at the last line parsed during the previous
run. Useful if you have a log rotation not sync with your pgbadger run.
For exemple you can run somthing like this:
pgbadger `find /var/log/postgresql/ -name "postgresql*" -mtime -7 -type f` \
-o report_`date +%F`.html -l /var/run/pgbadger/last_run.log
* All queries diplayed in the HTML report are now clickable to display or
hide a nice SQL query format. This is called SQL format beautifier.
* CSV log parser have been entirely rewritten to handle csv with multiline.
Every one should upgrade.
- Change license from BSD like to PostgreSQL license. Request from
Robert Treat.
- Fix wrong pointer on Connections per host menu. Reported by Jean-Paul
Argudo.
- Small fix for sql formatting adding scrollbars. Patch by Julien
Rouhaud.
- Add SQL format beautifier on SQL queries. When you will click on a
query it will be beautified. Patch by Gilles Darold
- The progress bar is now enabled by default, the -p option has been
removed. Use -q | --quiet to disable it. Patch by Gilles Darold.
- Graphs are now generated by default for HTML output, option -g as
been remove and option -G added to allow disabling graph generation.
Request from Julien Rouhaud, patch by Gilles Darold.
- Remove option -g and -p to the documentation. Patch by Gilles Darold.
- Fix case sensitivity in command line options. Patch by Julien Rouhaud.
- Add -T|--title option to change report title. Patch by Yury Bushmelev.
- Add new option --exclude-file to exclude specific commands with regex
stated in a file. This is a rewrite by Gilles Darold of the neoeahit
(Vipul) patch.
- CSV log parser have been entirely rewritten to handle csv with multi
line, it also adds approximative duration for csvlog. Reported by
Ludhimila Kendrick, patch by Gilles Darold.
- Alphabetical reordering of options list in method usage() and
documentation. Patch by Gilles Darold.
- Remove obsolete -l | --logfile command line option, the -l option
will be reused to specify an incremental file. Patch by Gilles Darold.
- Add -l | --last-parsed options to allow incremental run of pgbadger.
Patch by Gilles Darold.
- Replace call to timelocal_nocheck by timegm_nocheck, to convert date
time into second from the epoch. This should fix timezone issue.
Patch by Gilles Darold.
- Change regex on log parser to allow missing ending space in
log_line_prefix. This seems a common mistake. Patch by Gilles Darold.
- print warning when an empty log file is found. Patch by Gilles Darold.
- Add perltidy rc file to format pgbadger Perl code. Patch from depesz.
2012-07-15 - version 1.2
This version adds some reports and fixes a major issue in log parser. Every one
should upgrade.
- Rewrite this changelog to be human readable.
- Add -v | --verbose to enable debug mode. It is now disable by default
- Add hourly report of checkpoint warning when checkpoints are occuring
too frequently, it will display the hourly count and the average
occuring time.
- Add new report that sums the messages by log types. The report shows
the number of messages of each log type, and a percentage. It also
displays a pie graph. Patch by Guillaume Lelarge.
- Add missing pie graph on locks by type report.
- Format pie mouse track to display values only.
- Fix graph download button id on new connection graph.
- Add trackFormatter to flotr2 line graphs to show current x/y values.
- Fix issue on per minute minimum value.
- Add a note about Windows Os and zcat as well as a more general note
about using compressed log file in other format than gzip.
- Complete rewrite of the log parser to handle unordered log lines.
Data are now stored by pid before and added to the global statistics
at end. Error report now include full details, statements, contexts
and hints when available. Deadlock are also fully reported with the
concerned queries.
- Fix miss handling of multi lines queries on syslog.
- Add -a|--average option to configure the per minutes average interval
for queries and connexions. If you want the average to be calculated
each minutes instead of the 5 per default, use --average 1 or for the
default --average 5. If you want average per hour set it to 60.
- Add hourly statistics of connections and sessions as well as a chart
about the number of connection per second (5 minutes average).
- Allow OTHERS type of queries lower than 2% to be include in the sum of
types < 2%.
- Add autodetection of syslog ident name if different than the default
"postgres" and that there is just one ident name in the log.
- Remove syslog replacement of tabulation by #011 still visible when
there was multiple tabulation.
- Fix autodetection of log format syslog with single-digit day number
in date.
- Add ChangeLog to MANIFEST and change URI in html footer.
- Check pgBadger compatibility with Windows Oses. Run perfectly.
2012-07-04 - version 1.1
This release fixes lot of issues and adds several main features.
New feature:
- Add possibility to get log from stdin
- Change syslog parsing regex to allow log timestamp in log_line_prefix
very often forgotten when log destination is changed from stderr to
syslog.
- Add documentation for the -z | --zcat command line option.
- Allow `zcat` location to be specified via `--zcat` - David E. Wheeler
- Add --disable-session,--disable-connection and disable-checkpoint
command line options to remove their respective reports from the
output
- Add --disable-query command line option to remove queries statistics
from the output
- Add --disable-hourly command line option to remove hourly statistics
from the output
- Add --disable-error command line option to remove error report from
the output
- Add --exclude-query option to exclude types of queries by specifying
a regex
- Set thousand separator and decimal separator to be locale dependant
- Add -w option to only report errors
- Add Makefile.PL and full POD documentation to the project
- Allow multiple log files from command line
- Add simple csvlog support - Alex Hunsaker
- Hourly report for temporary files and checkpoints have moved in a
separate table.
- Add hourly connections and sessions statistics.
- Add a chart about the number of connections per seconds.
Bug fix:
- Add information about log format requirement (lc_message = 'C').
Reported by Alain Benard.
- Fix for begin/end dates with single digit day using syslog. Patch by
Joseph Marlin.
- Fix handle of syslog dates with single-digit day number. Patch by
Denis Orlikhin.
- Fix many English syntax in error messages and documentation. Patch by
Joseph Marlin.
- Fix non terminated TH html tag in checkpoint hourly table. Reported
by Joseph Marlin.
- "Log file" section will now only report first and last log file parsed
- Fix empty output in hourly temporary file stats.
- Fix wrapping query that goes out of the table and makes the window
scroll horizontally. Asked by Isaac Reuben.
- Fix code where != was replaced by $$CLASSSY0A$$!=$$CLASSSY0B$$ in the
output. Reported by Isaac Reuben
- Fix and review text report output.
- Fix an issue in SQL code highligh replacement.
- Complete review of the HTML output.
- Add .gitignore for swap files. Patch by Vincent Picavet
- Fix wrong variable for user and database filter. Patch by Vincent
Picavet.
- Change default regexp for user and db to be able to detect both. Patch
by Vincent Picavet.
- Fix false cur_date when using syslog and allow -b and -e options to
work. Patch by Vincent Picavet.
- Fix some case where logs where not detected as PostgreSQL log lines.
- Added explanation for --begin and --end datetime setting. Patch by
ragged.
- Added -v / --version. Patch by ragged.
- Fix usage information and presentation in README file.
2012-05-04 - version to 1.0
First public release of pgBadger.
New feature:
- Add graph of ckeckpoint Wal files usage (added, removed, recycled).
- Add --image-format to allow the change of the default png image
format to jpeg.
- Allow download of all pie graphics as images.
- Add --pie-limit to sum all data lower than this percentage limit to
avoid label overlap.
- Allow download of graphics as PNG images.
- Replace GD::Graph by the Flotr2 javascript library to draw graphics.
Patch by Guillaume Lelarge
- Add pie graphs for session, database, user and host. Add a --quiet
option to remove debug output and --progress to show a progress bar
during log parsing
- Add pie graph for Queries by type.
- Add graph for checkpoint write buffer per hours
- Allow log parsing without any log_line_prefix and extend it to be
defined by the user. Custom log_line prefix can be parsed using user
defined regex with command line option --regex-db and --regex-user.
For exemple the default regex of pgbadger to parse user and db name
from log_line_prefix can be written like this:
pgbadger -l mylogfile.log --regex-user="user=([^,]*)," \
--regex-db="db=([^,]*)"
- Separe log_line_prefix from log level part in the parser to extend
log_line_prefix parsing
- If there is just one argument, assume it is the logfile and use
default value for all other parameters
- Add autodetection of log format (syslog or stderr) if none is given
with option -f
- Add --outfile option to dump output to a file instead of stdout.
Default filename is out.html or out.txt following the output format.
To dump to stdout set filename to -
- Add --version command line option to show current pgbadger version.
Bug fix:
- Rearrange x and y axis
- Fix legend opacity on graphics
- Rearrange Overall stats view
- Add more "normalization" on errors messages
- Fix samples error with normalyzed error instead of real error message
- Fix an other average size of temporary file decimal limit
- Force quiet mode when --progress is used
- Fix per sessions graphs
- Fix sort order of days/hours into hours array
- Fix sort order of days into graphics
- Remove display of locks, sessions and connections statistics when none
are available
- Fix display of empty column of checkpoint when no checkpoint was found
in log file
pgbadger-7.1/LICENSE 0000664 0000000 0000000 00000001607 12550162376 0014154 0 ustar 00root root 0000000 0000000 Copyright (c) 2012-2015, Dalibo
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose, without fee, and without a written agreement
is hereby granted, provided that the above copyright notice and this
paragraph and the following two paragraphs appear in all copies.
IN NO EVENT SHALL Dalibo BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
Dalibo HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Dalibo SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Dalibo
HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
OR MODIFICATIONS.
pgbadger-7.1/MANIFEST 0000664 0000000 0000000 00000000121 12550162376 0014266 0 ustar 00root root 0000000 0000000 LICENSE
Makefile.PL
MANIFEST
META.yml
pgbadger
README
doc/pgBadger.pod
ChangeLog
pgbadger-7.1/META.yml 0000664 0000000 0000000 00000000516 12550162376 0014416 0 ustar 00root root 0000000 0000000 # http://module-build.sourceforge.net/META-spec.html
#XXXXXXX This is a prototype!!! It will change in the future!!! XXXXX#
name: pgBadger
version: 1.1
version_from: pgbadger
installdirs: site
recommends:
Text::CSV_XS: 0
distribution_type: script
generated_by: ExtUtils::MakeMaker version 6.17
pgbadger-7.1/Makefile.PL 0000664 0000000 0000000 00000002570 12550162376 0015121 0 ustar 00root root 0000000 0000000 use ExtUtils::MakeMaker;
# See lib/ExtUtils/MakeMaker.pm for details of how to influence
# the contents of the Makefile that is written.
use strict;
my @ALLOWED_ARGS = ('INSTALLDIRS','DESTDIR');
# Parse command line arguments and store them as environment variables
while ($_ = shift) {
my ($k,$v) = split(/=/, $_, 2);
if (grep(/^$k$/, @ALLOWED_ARGS)) {
$ENV{$k} = $v;
}
}
$ENV{DESTDIR} =~ s/\/$//;
# Default install path
my $DESTDIR = $ENV{DESTDIR} || '';
my $INSTALLDIRS = $ENV{INSTALLDIRS} || 'site';
my %merge_compat = ();
if ($ExtUtils::MakeMaker::VERSION >= 6.46) {
%merge_compat = (
'META_MERGE' => {
resources => {
homepage => 'http://projects.dalibo.org/pgbadger',
repository => {
type => 'git',
git => 'git@github.com:dalibo/pgbadger.git',
web => 'https://github.com/dalibo/pgbadger',
},
},
}
);
}
WriteMakefile(
'DISTNAME' => 'pgbadger',
'NAME' => 'pgBadger',
'VERSION_FROM' => 'pgbadger',
'dist' => {
'COMPRESS'=>'gzip -9f', 'SUFFIX' => 'gz',
'ZIP'=>'/usr/bin/zip','ZIPFLAGS'=>'-rl'
},
'AUTHOR' => 'Gilles Darold (gilles@darold.net)',
'ABSTRACT' => 'pgBadger - PostgreSQL log analysis report',
'EXE_FILES' => [ qw(pgbadger) ],
'MAN1PODS' => { 'doc/pgBadger.pod' => 'blib/man1/pgbadger.1p' },
'DESTDIR' => $DESTDIR,
'INSTALLDIRS' => $INSTALLDIRS,
'clean' => {},
%merge_compat
);
pgbadger-7.1/README 0000664 0000000 0000000 00000067243 12550162376 0014037 0 ustar 00root root 0000000 0000000 NAME
pgBadger - a fast PostgreSQL log analysis report
SYNOPSIS
Usage: pgbadger [options] logfile [...]
PostgreSQL log analyzer with fully detailed reports and graphs.
Arguments:
logfile can be a single log file, a list of files, or a shell command
returning a list of files. If you want to pass log content from stdin
use - as filename. Note that input from stdin will not work with csvlog.
You can also use a file containing a list of log file to parse, see -L
command line option.
Options:
-a | --average minutes : number of minutes to build the average graphs of
queries and connections. Default 5 minutes.
-A | --histo-avg minutes: number of minutes to build the histogram graphs
of queries. Default 60 minutes.
-b | --begin datetime : start date/time for the data to be parsed in log.
-B | --bar-graph : use bar graph instead of line by default.
-c | --dbclient host : only report on entries for the given client host.
-C | --nocomment : remove comments like /* ... */ from queries.
-d | --dbname database : only report on entries for the given database.
-D | --dns-resolv : client ip adresses are replaced by their DNS name.
Be warned that this can really slow down pgBadger.
-e | --end datetime : end date/time for the data to be parsed in log.
-f | --format logtype : possible values: syslog, syslog2, stderr and csv.
Default: stderr.
-G | --nograph : disable graphs on HTML output. Enabled by default.
-h | --help : show this message and exit.
-i | --ident name : programname used as syslog ident. Default: postgres
-I | --incremental : use incremental mode, reports will be generated by
days in a separate directory, --outdir must be set.
-j | --jobs number : number of jobs to run at same time. Default is 1,
run as single process.
-J | --Jobs number : number of log file to parse in parallel. Default
is 1, run as single process.
-l | --last-parsed file: allow incremental log parsing by registering the
last datetime and line parsed. Useful if you want
to watch errors since last run or if you want one
report per day with a log rotated each week.
-L | logfile-list file : file containing a list of log file to parse.
-m | --maxlength size : maximum length of a query, it will be restricted to
the given size. Default: no truncate
-M | --no-multiline : do not collect multiline statement to avoid garbage
especially on errors that generate a huge report.
-n | --nohighlight : disable SQL code highlighting.
-N | --appname name : only report on entries for given application name
-o | --outfile filename: define the filename for the output. Default depends
on the output format: out.html, out.txt, out.bin,
out.json or out.tsung.
With module JSON::XS installed, you can output file
in JSON format either.
To dump output to stdout use - as filename.
-O | --outdir path : directory where out file must be saved.
-p | --prefix string : the value of your custom log_line_prefix as
defined in your postgresql.conf. Only use it if you
aren't using one of the standard prefixes specified
in the pgBadger documentation, such as if your
prefix includes additional variables like client ip
or application name. See examples below.
-P | --no-prettify : disable SQL queries prettify formatter.
-q | --quiet : don't print anything to stdout, not even a progress
bar.
-r | --remote-host ip : set the host where to execute the cat command on
remote logfile to parse localy the file.
-R | --retention N : number of week to keep in incremental mode. Default
to 0, disabled. Used to set the number of weel to
keep in output directory. Older weeks and days
directory are automatically removed.
-s | --sample number : number of query samples to store. Default: 3.
-S | --select-only : only report SELECT queries.
-t | --top number : number of queries to store/display. Default: 20.
-T | --title string : change title of the HTML page report.
-u | --dbuser username : only report on entries for the given user.
-U | --exclude-user username : exclude entries for the specified user from
report.
-v | --verbose : enable verbose or debug mode. Disabled by default.
-V | --version : show pgBadger version and exit.
-w | --watch-mode : only report errors just like logwatch could do.
-x | --extension : output format. Values: text, html, bin, json or
tsung. Default: html
-X | --extra-files : in incremetal mode allow pgbadger to write CSS and
JS files in the output directory as separate files.
-z | --zcat exec_path : set the full path to the zcat program. Use it if
zcat or bzcat or unzip is not in your path.
--pie-limit num : pie data lower than num% will show a sum instead.
--exclude-query regex : any query matching the given regex will be excluded
from the report. For example: "^(VACUUM|COMMIT)"
You can use this option multiple times.
--exclude-file filename: path of the file which contains all the regex to
use to exclude queries from the report. One regex
per line.
--include-query regex : any query that does not match the given regex will
be excluded from the report. You can use this
option multiple times. For example: "(tbl1|tbl2)".
--include-file filename: path of the file which contains all the regex of
the queries to include from the report. One regex
per line.
--disable-error : do not generate error report.
--disable-hourly : do not generate hourly report.
--disable-type : do not generate report of queries by type, database
or user.
--disable-query : do not generate query reports (slowest, most
frequent, queries by users, by database, ...).
--disable-session : do not generate session report.
--disable-connection : do not generate connection report.
--disable-lock : do not generate lock report.
--disable-temporary : do not generate temporary report.
--disable-checkpoint : do not generate checkpoint/restartpoint report.
--disable-autovacuum : do not generate autovacuum report.
--charset : used to set the HTML charset to be used.
Default: utf-8.
--csv-separator : used to set the CSV field separator, default: ,
--exclude-time regex : any timestamp matching the given regex will be
excluded from the report. Example: "2013-04-12 .*"
You can use this option multiple times.
--exclude-appname name : exclude entries for the specified application name
from report. Example: "pg_dump".
--exclude-line regex : pgbadger will start to exclude any log entry that
will match the given regex. Can be used multiple
time.
--anonymize : obscure all literals in queries, useful to hide
confidential data.
--noreport : prevent pgbadger to create reports in incremental
mode.
--log-duration : force pgbadger to associate log entries generated
by both log_duration = on and log_statement = 'all'
--enable-checksum : used to add a md5 sum under each query report.
pgBadger is able to parse a remote log file using a passwordless ssh
connection. Use the -r or --remote-host to set the host ip address or
hostname. There's also some additional options to fully control the ssh
connection.
--ssh-program ssh path to the ssh program to use. Default: ssh.
--ssh-user username connection login name. Default to running user.
--ssh-identity file path to the identity file to use.
--ssh-timeout second timeout to ssh connection failure. Default 10 secs.
--ssh-options options list of -o options to use for the ssh connection.
Options always used:
-o ConnectTimeout=$ssh_timeout
-o PreferredAuthentications=hostbased,publickey
Examples:
pgbadger /var/log/postgresql.log
pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz
/var/log/postgres.log
pgbadger /var/log/postgresql/postgresql-2012-05-*
pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log
pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11"
/var/log/postgresql.log
cat /var/log/postgres.log | pgbadger -
# Log prefix with stderr log output
perl pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h'
/pglog/postgresql-2012-08-21*
perl pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log
# Log line prefix with syslog log output
perl pgbadger --prefix 'user=%u,db=%d,client=%h,app=%a'
/pglog/postgresql-2012-08-21*
# Use my 8 CPUs to parse my 10GB file faster, much faster
perl pgbadger -j 8 /pglog/postgresql-9.1-main.log
Generate Tsung sessions XML file with select queries only:
perl pgbadger -S -o sessions.tsung --prefix '%t [%p]: [%l-1] user=%u,db=%d ' /pglog/postgresql-9.1.log
Reporting errors every week by cron job:
30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html
Generate report every week using incremental behavior:
0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"`
-o /var/reports/pg_errors-`date +%F`.html -l /var/reports/pgbadger_incremental_file.dat
This supposes that your log file and HTML report are also rotated every
week.
Or better, use the auto-generated incremental reports:
0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1
-O /var/www/pg_reports/
will generate a report per day and per week.
In incremental mode, you can also specify the number of week to keep in
the reports:
/usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1
-O /var/www/pg_reports/
If you have a pg_dump at 23:00 and 13:00 each day during half an hour,
you can use pgbadger as follow to exclude these period from the report:
pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log
This will help avoid having COPY statements, as generated by pg_dump, on
top of the list of slowest queries. You can also use --exclude-appname
"pg_dump" to solve this problem in a simpler way.
DESCRIPTION
pgBadger is a PostgreSQL log analyzer build for speed with fully
detailed reports from your PostgreSQL log file. It's a single and small
Perl script that outperform any other PostgreSQL log analyzer.
It is written in pure Perl language and uses a javascript library
(flotr2) to draw graphs so that you don't need to install any additional
Perl modules or other packages. Furthermore, this library gives us more
features such as zooming. pgBadger also uses the Bootstrap javascript
library and the FontAwesome webfont for better design. Everything is
embedded.
pgBadger is able to autodetect your log file format (syslog, stderr or
csvlog). It is designed to parse huge log files as well as gzip
compressed file. See a complete list of features below. Supported
compressed format are gzip, bzip2 and xz. For the last one you must have
a xz version upper than 5.05 that support the --robot option.
All charts are zoomable and can be saved as PNG images.
You can also limit pgBadger to only report errors or remove any part of
the report using command line options.
pgBadger supports any custom format set into the log_line_prefix
directive of your postgresql.conf file as long as it at least specify
the %t and %p patterns.
pgBadger allow parallel processing on a single log file and multiple
files through the use of the -j option and the number of CPUs as value.
If you want to save system performance you can also use log_duration
instead of log_min_duration_statement to have reports on duration and
number of queries only.
FEATURE
pgBadger reports everything about your SQL queries:
Overall statistics
The most frequent waiting queries.
Queries that waited the most.
Queries generating the most temporary files.
Queries generating the largest temporary files.
The slowest queries.
Queries that took up the most time.
The most frequent queries.
The most frequent errors.
Histogram of query times.
Histogram of sessions times.
Users involved in top queries.
Applications involved in top queries.
Queries generating the most cancellation.
Queries most cancelled.
The following reports are also available with hourly charts divide by
periods of five minutes:
SQL queries statistics.
Temporary file statistics.
Checkpoints statistics.
Autovacuum and autoanalyze statistics.
Cancelled queries.
Error events (panic, fatal, error and warning).
There's also some pie reports of distribution about:
Locks statistics.
Queries by type (select/insert/update/delete).
Distribution of queries type per database/application
Sessions per database/user/client/application.
Connections per database/user/client/application.
Autovacuum and autoanalyze per table.
Queries per user and total duration per user.
All charts are zoomable and can be saved as PNG images. SQL queries
reported are highlighted and beautified automatically.
You can also have incremental reports with one report per day and a
cumulative report per week. Two multiprocess modes are available to
speed up log parsing, one using one core per log file, and the second to
use multiple core to parse a single file. Both modes can be combined.
Histogram granularity can be adjusted using the -A command line option.
By default they will report the mean of each top queries/error occuring
per hour, but you can specify the granularity down to the minute.
pgBadger can also be used in a central place to parse remote log files
using a password less SSH connection. This mode can be used with
compressed files and in mode multiprocess per file (-J) but can not be
used with CSV log format.
REQUIREMENT
pgBadger comes as a single Perl script - you do not need anything other
than a modern Perl distribution. Charts are rendered using a Javascript
library so you don't need anything. Your browser will do all the work.
If you planned to parse PostgreSQL CSV log files you might need some
Perl Modules:
Text::CSV_XS - to parse PostgreSQL CSV log files.
This module is optional, if you don't have PostgreSQL log in the CSV
format you don't need to install it.
If you want to export statistics as JSON file you need an additional
Perl module:
JSON::XS - JSON serialising/deserialising, done correctly and fast
This module is optional, if you don't select the json output format you
don't need to install it.
Compressed log file format is autodetected from the file exension. If
pgBadger find a gz extension it will use the zcat utility, with a bz2
extension it will use bzcat and if the file extension is zip or xz then
the unzip or xz utilities will be used.
If those utilities are not found in the PATH environment variable then
use the --zcat command line option to change this path. For example:
--zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc"
--zcat="C:\tools\unzip -p"
By default pgBadger will use the zcat, bzcat and unzip utilities
following the file extension. If you use the default autodetection
compress format you can mixed gz, bz2, xz or zip files. Specifying a
custom value to --zcat option will remove this feature of mixed
compressed format.
Note that multiprocessing can not be used with compressed files or CSV
files as well as under Windows platform.
INSTALLATION
Download the tarball from github and unpack the archive as follow:
tar xzf pgbadger-7.x.tar.gz
cd pgbadger-7.x/
perl Makefile.PL
make && sudo make install
This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by
default and the man page into /usr/local/share/man/man1/pgbadger.1.
Those are the default installation directories for 'site' install.
If you want to install all under /usr/ location, use INSTALLDIRS='perl'
as an argument of Makefile.PL. The script will be installed into
/usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1.
For example, to install everything just like Debian does, proceed as
follows:
perl Makefile.PL INSTALLDIRS=vendor
By default INSTALLDIRS is set to site.
POSTGRESQL CONFIGURATION
You must enable and set some configuration directives in your
postgresql.conf before starting.
You must first enable SQL query logging to have something to parse:
log_min_duration_statement = 0
Here every statement will be logged, on busy server you may want to
increase this value to only log queries with a higher duration time.
Note that if you have log_statement set to 'all' nothing will be logged
through directive log_min_duration_statement. See next chapter for more
information.
With 'stderr' log format, log_line_prefix must be at least:
log_line_prefix = '%t [%p]: [%l-1] '
Log line prefix could add user, database name, application name and
client ip address as follows:
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
or for syslog log file format:
log_line_prefix = 'user=%u,db=%d,app=%aclient=%h '
Log line prefix for stderr output could also be:
log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h '
or for syslog output:
log_line_prefix = 'db=%d,user=%u,app=%a,client=%h '
You need to enable other parameters in postgresql.conf to get more
information from your log files:
log_checkpoints = on
log_connections = on
log_disconnections = on
log_lock_waits = on
log_temp_files = 0
log_autovacuum_min_duration = 0
Do not enable log_statement as their log format will not be parsed by
pgBadger.
Of course your log messages should be in English without locale support:
lc_messages='C'
but this is not only recommended by pgBadger.
Note: the session line [%l-1] is just used to match the default prefix
for "stderr". The -1 has no real purpose and basically is not used in
Pgbadger statistics / graphs. You can safely removed them from the
log_line_prefix but you will need to set the --prefix command line
option.
log_min_duration_statement, log_duration and log_statement
If you want full statistics reports you must set
log_min_duration_statement to 0 or more milliseconds.
If you just want to report duration and number of queries and don't want
all details about queries, set log_min_duration_statement to -1 to
disable it and enable log_duration in your postgresql.conf file. If you
want to add the most common request report you can either choose to set
log_min_duration_statement to a higher value or choose to enable
log_statement.
Enabling log_min_duration_statement will add reports about slowest
queries and queries that took up the most time. Take care that if you
have log_statement set to 'all' nothing will be logged with
log_line_prefix.
PARALLEL PROCESSING
To enable parallel processing you just have to use the -j N option where
N is the number of cores you want to use.
pgbadger will then proceed as follow:
for each log file
chunk size = int(file size / N)
look at start/end offsets of these chunks
fork N processes and seek to the start offset of each chunk
each process will terminate when the parser reach the end offset
of its chunk
each process write stats into a binary temporary file
wait for all children has terminated
All binary temporary files generated will then be read and loaded into
memory to build the html output.
With that method, at start/end of chunks pgbadger may truncate or omit a
maximum of N queries perl log file which is an insignificant gap if you
have millions of queries in your log file. The chance that the query
that you were looking for is loose is near 0, this is why I think this
gap is livable. Most of the time the query is counted twice but
truncated.
When you have lot of small log files and lot of CPUs it is speedier to
dedicate one core to one log file at a time. To enable this behavior you
have to use option -J N instead. With 200 log files of 10MB each the use
of the -J option start being really interesting with 8 Cores. Using this
method you will be sure to not loose any queries in the reports.
He are a benchmarck done on a server with 8 CPUs and a single file of
9.5GB.
Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU
--------+---------+-------+-------+------
-j | 1h41m18 | 50m25 | 25m39 | 15m58
-J | 1h41m18 | 54m28 | 41m16 | 34m45
With 200 log files of 10MB each and a total og 2GB the results are
slightly different:
Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU
--------+-------+-------+-------+------
-j | 20m15 | 9m56 | 5m20 | 4m20
-J | 20m15 | 9m49 | 5m00 | 2m40
So it is recommanded to use -j unless you have hundred of small log file
and can use at least 8 CPUs.
IMPORTANT: when you are using parallel parsing pgbadger will generate a
lot of temporary files in the /tmp directory and will remove them at
end, so do not remove those files unless pgbadger is not running. They
are all named with the following template tmp_pgbadgerXXXX.bin so they
can be easily identified.
INCREMENTAL REPORTS
pgBadger include an automatic incremental report mode using option -I or
--incremental. When running in this mode, pgBadger will generate one
report per day and a cumulative report per week. Output is first done in
binary format into the mandatory output directory (see option -O or
--outdir), then in HTML format for daily and weekly reports with a main
index file.
The main index file will show a dropdown menu per week with a link to
the week report and links to daily reports of this week.
For example, if you run pgBadger as follow based on a daily rotated
file:
0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 \
-O /var/www/pg_reports/
you will have all daily and weekly reports for the full running period.
In this mode pgBagder will create an automatic incremental file into the
output directory, so you don't have to use the -l option unless you want
to change the path of that file. This mean that you can run pgBadger in
this mode each days on a log file rotated each week, it will not count
the log entries twice.
To save disk space you may want to use the -X or --extra-files command
line option to force pgBadger to write javascript and css to separate
files in the output directory. The resources will then be loaded using
script and link tag.
BINARY FORMAT
Using the binary format it is possible to create custom incremental and
cumulative reports. For example, if you want to refresh a pgbadger
report each hour from a daily PostgreSQl log file, you can proceed by
running each hour the following commands:
pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log
to generate the incremental data files in binary format. And to generate
the fresh HTML report from that binary file:
pgbadger sunday/*.bin
Or an other example, if you have one log file per hour and you want a
reports to be rebuild each time the log file is switched. Proceed as
follow:
pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log
pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log
pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log
...
When you want to refresh the HTML report, for example each time after a
new binary file is generated, just do the following:
pgbadger -o day1_report.html day1/*.bin
Adjust the commands following your needs.
JSON FORMAT
JSON format is good for sharing data with other languages, which makes
it easy to integrate pgBadger's result into other monitoring tools like
Cacti or Graphite.
AUTHORS
pgBadger is an original work from Gilles Darold.
The pgBadger logo is an original creation of Damien Clochard.
The pgBadger v4.x design comes from the "Art is code" company.
This web site is a work of Gilles Darold.
pgBadger is maintained by Gilles Darold, the good folks at Dalibo, and
every one who wants to contribute.
Many people have contributed to pgBadger, they are all quoted in the
Changelog file.
LICENSE
pgBadger is free software distributed under the PostgreSQL Licence.
Copyright (c) 2012-2015, Dalibo
A modified version of the SQL::Beautify Perl Module is embedded in
pgBadger with copyright (C) 2009 by Jonas Kramer and is published under
the terms of the Artistic License 2.0.
pgbadger-7.1/doc/ 0000775 0000000 0000000 00000000000 12550162376 0013710 5 ustar 00root root 0000000 0000000 pgbadger-7.1/doc/pgBadger.pod 0000664 0000000 0000000 00000061724 12550162376 0016141 0 ustar 00root root 0000000 0000000 =head1 NAME
pgBadger - a fast PostgreSQL log analysis report
=head1 SYNOPSIS
Usage: pgbadger [options] logfile [...]
PostgreSQL log analyzer with fully detailed reports and graphs.
Arguments:
logfile can be a single log file, a list of files, or a shell command
returning a list of files. If you want to pass log content from stdin
use - as filename. Note that input from stdin will not work with csvlog.
You can also use a file containing a list of log file to parse, see -L
command line option.
Options:
-a | --average minutes : number of minutes to build the average graphs of
queries and connections. Default 5 minutes.
-A | --histo-avg minutes: number of minutes to build the histogram graphs
of queries. Default 60 minutes.
-b | --begin datetime : start date/time for the data to be parsed in log.
-B | --bar-graph : use bar graph instead of line by default.
-c | --dbclient host : only report on entries for the given client host.
-C | --nocomment : remove comments like /* ... */ from queries.
-d | --dbname database : only report on entries for the given database.
-D | --dns-resolv : client ip adresses are replaced by their DNS name.
Be warned that this can really slow down pgBadger.
-e | --end datetime : end date/time for the data to be parsed in log.
-f | --format logtype : possible values: syslog, syslog2, stderr and csv.
Default: stderr.
-G | --nograph : disable graphs on HTML output. Enabled by default.
-h | --help : show this message and exit.
-i | --ident name : programname used as syslog ident. Default: postgres
-I | --incremental : use incremental mode, reports will be generated by
days in a separate directory, --outdir must be set.
-j | --jobs number : number of jobs to run at same time. Default is 1,
run as single process.
-J | --Jobs number : number of log file to parse in parallel. Default
is 1, run as single process.
-l | --last-parsed file: allow incremental log parsing by registering the
last datetime and line parsed. Useful if you want
to watch errors since last run or if you want one
report per day with a log rotated each week.
-L | logfile-list file : file containing a list of log file to parse.
-m | --maxlength size : maximum length of a query, it will be restricted to
the given size. Default: no truncate
-M | --no-multiline : do not collect multiline statement to avoid garbage
especially on errors that generate a huge report.
-n | --nohighlight : disable SQL code highlighting.
-N | --appname name : only report on entries for given application name
-o | --outfile filename: define the filename for the output. Default depends
on the output format: out.html, out.txt, out.bin,
out.json or out.tsung.
With module JSON::XS installed, you can output file
in JSON format either.
To dump output to stdout use - as filename.
-O | --outdir path : directory where out file must be saved.
-p | --prefix string : the value of your custom log_line_prefix as
defined in your postgresql.conf. Only use it if you
aren't using one of the standard prefixes specified
in the pgBadger documentation, such as if your
prefix includes additional variables like client ip
or application name. See examples below.
-P | --no-prettify : disable SQL queries prettify formatter.
-q | --quiet : don't print anything to stdout, not even a progress
bar.
-r | --remote-host ip : set the host where to execute the cat command on
remote logfile to parse localy the file.
-R | --retention N : number of week to keep in incremental mode. Default
to 0, disabled. Used to set the number of weel to
keep in output directory. Older weeks and days
directory are automatically removed.
-s | --sample number : number of query samples to store. Default: 3.
-S | --select-only : only report SELECT queries.
-t | --top number : number of queries to store/display. Default: 20.
-T | --title string : change title of the HTML page report.
-u | --dbuser username : only report on entries for the given user.
-U | --exclude-user username : exclude entries for the specified user from
report.
-v | --verbose : enable verbose or debug mode. Disabled by default.
-V | --version : show pgBadger version and exit.
-w | --watch-mode : only report errors just like logwatch could do.
-x | --extension : output format. Values: text, html, bin, json or
tsung. Default: html
-X | --extra-files : in incremetal mode allow pgbadger to write CSS and
JS files in the output directory as separate files.
-z | --zcat exec_path : set the full path to the zcat program. Use it if
zcat or bzcat or unzip is not in your path.
--pie-limit num : pie data lower than num% will show a sum instead.
--exclude-query regex : any query matching the given regex will be excluded
from the report. For example: "^(VACUUM|COMMIT)"
You can use this option multiple times.
--exclude-file filename: path of the file which contains all the regex to
use to exclude queries from the report. One regex
per line.
--include-query regex : any query that does not match the given regex will
be excluded from the report. You can use this
option multiple times. For example: "(tbl1|tbl2)".
--include-file filename: path of the file which contains all the regex of
the queries to include from the report. One regex
per line.
--disable-error : do not generate error report.
--disable-hourly : do not generate hourly report.
--disable-type : do not generate report of queries by type, database
or user.
--disable-query : do not generate query reports (slowest, most
frequent, queries by users, by database, ...).
--disable-session : do not generate session report.
--disable-connection : do not generate connection report.
--disable-lock : do not generate lock report.
--disable-temporary : do not generate temporary report.
--disable-checkpoint : do not generate checkpoint/restartpoint report.
--disable-autovacuum : do not generate autovacuum report.
--charset : used to set the HTML charset to be used.
Default: utf-8.
--csv-separator : used to set the CSV field separator, default: ,
--exclude-time regex : any timestamp matching the given regex will be
excluded from the report. Example: "2013-04-12 .*"
You can use this option multiple times.
--exclude-appname name : exclude entries for the specified application name
from report. Example: "pg_dump".
--exclude-line regex : pgbadger will start to exclude any log entry that
will match the given regex. Can be used multiple
time.
--anonymize : obscure all literals in queries, useful to hide
confidential data.
--noreport : prevent pgbadger to create reports in incremental
mode.
--log-duration : force pgbadger to associate log entries generated
by both log_duration = on and log_statement = 'all'
--enable-checksum : used to add a md5 sum under each query report.
pgBadger is able to parse a remote log file using a passwordless ssh connection.
Use the -r or --remote-host to set the host ip address or hostname. There's also
some additional options to fully control the ssh connection.
--ssh-program ssh path to the ssh program to use. Default: ssh.
--ssh-user username connection login name. Default to running user.
--ssh-identity file path to the identity file to use.
--ssh-timeout second timeout to ssh connection failure. Default 10 secs.
--ssh-options options list of -o options to use for the ssh connection.
Options always used:
-o ConnectTimeout=$ssh_timeout
-o PreferredAuthentications=hostbased,publickey
Examples:
pgbadger /var/log/postgresql.log
pgbadger /var/log/postgres.log.2.gz /var/log/postgres.log.1.gz
/var/log/postgres.log
pgbadger /var/log/postgresql/postgresql-2012-05-*
pgbadger --exclude-query="^(COPY|COMMIT)" /var/log/postgresql.log
pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11"
/var/log/postgresql.log
cat /var/log/postgres.log | pgbadger -
# Log prefix with stderr log output
perl pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h'
/pglog/postgresql-2012-08-21*
perl pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log
# Log line prefix with syslog log output
perl pgbadger --prefix 'user=%u,db=%d,client=%h,app=%a'
/pglog/postgresql-2012-08-21*
# Use my 8 CPUs to parse my 10GB file faster, much faster
perl pgbadger -j 8 /pglog/postgresql-9.1-main.log
Generate Tsung sessions XML file with select queries only:
perl pgbadger -S -o sessions.tsung --prefix '%t [%p]: [%l-1] user=%u,db=%d ' /pglog/postgresql-9.1.log
Reporting errors every week by cron job:
30 23 * * 1 /usr/bin/pgbadger -q -w /var/log/postgresql.log -o /var/reports/pg_errors.html
Generate report every week using incremental behavior:
0 4 * * 1 /usr/bin/pgbadger -q `find /var/log/ -mtime -7 -name "postgresql.log*"`
-o /var/reports/pg_errors-`date +%F`.html -l /var/reports/pgbadger_incremental_file.dat
This supposes that your log file and HTML report are also rotated every week.
Or better, use the auto-generated incremental reports:
0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1
-O /var/www/pg_reports/
will generate a report per day and per week.
In incremental mode, you can also specify the number of week to keep in the
reports:
/usr/bin/pgbadger --retention 2 -I -q /var/log/postgresql/postgresql.log.1
-O /var/www/pg_reports/
If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can
use pgbadger as follow to exclude these period from the report:
pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log
This will help avoid having COPY statements, as generated by pg_dump, on top of
the list of slowest queries. You can also use --exclude-appname "pg_dump" to
solve this problem in a simpler way.
=head1 DESCRIPTION
pgBadger is a PostgreSQL log analyzer build for speed with fully detailed
reports from your PostgreSQL log file. It's a single and small Perl script
that outperform any other PostgreSQL log analyzer.
It is written in pure Perl language and uses a javascript library (flotr2)
to draw graphs so that you don't need to install any additional Perl modules
or other packages. Furthermore, this library gives us more features such as
zooming. pgBadger also uses the Bootstrap javascript library and the FontAwesome
webfont for better design. Everything is embedded.
pgBadger is able to autodetect your log file format (syslog, stderr or csvlog).
It is designed to parse huge log files as well as gzip compressed file. See a
complete list of features below. Supported compressed format are gzip, bzip2
and xz. For the last one you must have a xz version upper than 5.05 that support
the --robot option.
All charts are zoomable and can be saved as PNG images.
You can also limit pgBadger to only report errors or remove any part of the
report using command line options.
pgBadger supports any custom format set into the log_line_prefix directive of
your postgresql.conf file as long as it at least specify the %t and %p patterns.
pgBadger allow parallel processing on a single log file and multiple files
through the use of the -j option and the number of CPUs as value.
If you want to save system performance you can also use log_duration instead of
log_min_duration_statement to have reports on duration and number of queries only.
=head1 FEATURE
pgBadger reports everything about your SQL queries:
Overall statistics
The most frequent waiting queries.
Queries that waited the most.
Queries generating the most temporary files.
Queries generating the largest temporary files.
The slowest queries.
Queries that took up the most time.
The most frequent queries.
The most frequent errors.
Histogram of query times.
Histogram of sessions times.
Users involved in top queries.
Applications involved in top queries.
Queries generating the most cancellation.
Queries most cancelled.
The following reports are also available with hourly charts divide by periods of
five minutes:
SQL queries statistics.
Temporary file statistics.
Checkpoints statistics.
Autovacuum and autoanalyze statistics.
Cancelled queries.
Error events (panic, fatal, error and warning).
There's also some pie reports of distribution about:
Locks statistics.
Queries by type (select/insert/update/delete).
Distribution of queries type per database/application
Sessions per database/user/client/application.
Connections per database/user/client/application.
Autovacuum and autoanalyze per table.
Queries per user and total duration per user.
All charts are zoomable and can be saved as PNG images. SQL queries reported are
highlighted and beautified automatically.
You can also have incremental reports with one report per day and a cumulative
report per week. Two multiprocess modes are available to speed up log parsing,
one using one core per log file, and the second to use multiple core to parse
a single file. Both modes can be combined.
Histogram granularity can be adjusted using the -A command line option. By default
they will report the mean of each top queries/error occuring per hour, but you can
specify the granularity down to the minute.
pgBadger can also be used in a central place to parse remote log files using a
password less SSH connection. This mode can be used with compressed files and
in mode multiprocess per file (-J) but can not be used with CSV log format.
=head1 REQUIREMENT
pgBadger comes as a single Perl script - you do not need anything other than a modern
Perl distribution. Charts are rendered using a Javascript library so you don't need
anything. Your browser will do all the work.
If you planned to parse PostgreSQL CSV log files you might need some Perl Modules:
Text::CSV_XS - to parse PostgreSQL CSV log files.
This module is optional, if you don't have PostgreSQL log in the CSV format you don't
need to install it.
If you want to export statistics as JSON file you need an additional Perl module:
JSON::XS - JSON serialising/deserialising, done correctly and fast
This module is optional, if you don't select the json output format you don't
need to install it.
Compressed log file format is autodetected from the file exension. If pgBadger find
a gz extension it will use the zcat utility, with a bz2 extension it will use bzcat
and if the file extension is zip or xz then the unzip or xz utilities will be used.
If those utilities are not found in the PATH environment variable then use the --zcat
command line option to change this path. For example:
--zcat="/usr/local/bin/gunzip -c" or --zcat="/usr/local/bin/bzip2 -dc"
--zcat="C:\tools\unzip -p"
By default pgBadger will use the zcat, bzcat and unzip utilities following the
file extension. If you use the default autodetection compress format you can
mixed gz, bz2, xz or zip files. Specifying a custom value to --zcat option will
remove this feature of mixed compressed format.
Note that multiprocessing can not be used with compressed files or CSV files as
well as under Windows platform.
=head1 INSTALLATION
Download the tarball from github and unpack the archive as follow:
tar xzf pgbadger-7.x.tar.gz
cd pgbadger-7.x/
perl Makefile.PL
make && sudo make install
This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the
man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation
directories for 'site' install.
If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument
of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage
into /usr/share/man/man1/pgbadger.1.
For example, to install everything just like Debian does, proceed as follows:
perl Makefile.PL INSTALLDIRS=vendor
By default INSTALLDIRS is set to site.
=head1 POSTGRESQL CONFIGURATION
You must enable and set some configuration directives in your postgresql.conf
before starting.
You must first enable SQL query logging to have something to parse:
log_min_duration_statement = 0
Here every statement will be logged, on busy server you may want to increase
this value to only log queries with a higher duration time. Note that if you
have log_statement set to 'all' nothing will be logged through directive
log_min_duration_statement. See next chapter for more information.
With 'stderr' log format, log_line_prefix must be at least:
log_line_prefix = '%t [%p]: [%l-1] '
Log line prefix could add user, database name, application name and client ip
address as follows:
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
or for syslog log file format:
log_line_prefix = 'user=%u,db=%d,app=%aclient=%h '
Log line prefix for stderr output could also be:
log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h '
or for syslog output:
log_line_prefix = 'db=%d,user=%u,app=%a,client=%h '
You need to enable other parameters in postgresql.conf to get more information from your log files:
log_checkpoints = on
log_connections = on
log_disconnections = on
log_lock_waits = on
log_temp_files = 0
log_autovacuum_min_duration = 0
Do not enable log_statement as their log format will not be parsed by pgBadger.
Of course your log messages should be in English without locale support:
lc_messages='C'
but this is not only recommended by pgBadger.
Note: the session line [%l-1] is just used to match the default prefix for "stderr".
The -1 has no real purpose and basically is not used in Pgbadger statistics / graphs.
You can safely removed them from the log_line_prefix but you will need to set the
--prefix command line option.
=head1 log_min_duration_statement, log_duration and log_statement
If you want full statistics reports you must set log_min_duration_statement
to 0 or more milliseconds.
If you just want to report duration and number of queries and don't want all
details about queries, set log_min_duration_statement to -1 to disable it and
enable log_duration in your postgresql.conf file. If you want to add the most
common request report you can either choose to set log_min_duration_statement
to a higher value or choose to enable log_statement.
Enabling log_min_duration_statement will add reports about slowest queries and
queries that took up the most time. Take care that if you have log_statement
set to 'all' nothing will be logged with log_line_prefix.
=head1 PARALLEL PROCESSING
To enable parallel processing you just have to use the -j N option where N is
the number of cores you want to use.
pgbadger will then proceed as follow:
for each log file
chunk size = int(file size / N)
look at start/end offsets of these chunks
fork N processes and seek to the start offset of each chunk
each process will terminate when the parser reach the end offset
of its chunk
each process write stats into a binary temporary file
wait for all children has terminated
All binary temporary files generated will then be read and loaded into
memory to build the html output.
With that method, at start/end of chunks pgbadger may truncate or omit a
maximum of N queries perl log file which is an insignificant gap if you have
millions of queries in your log file. The chance that the query that you were
looking for is loose is near 0, this is why I think this gap is livable. Most
of the time the query is counted twice but truncated.
When you have lot of small log files and lot of CPUs it is speedier to dedicate
one core to one log file at a time. To enable this behavior you have to use
option -J N instead. With 200 log files of 10MB each the use of the -J option
start being really interesting with 8 Cores. Using this method you will be sure
to not loose any queries in the reports.
He are a benchmarck done on a server with 8 CPUs and a single file of 9.5GB.
Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU
--------+---------+-------+-------+------
-j | 1h41m18 | 50m25 | 25m39 | 15m58
-J | 1h41m18 | 54m28 | 41m16 | 34m45
With 200 log files of 10MB each and a total og 2GB the results are slightly
different:
Option | 1 CPU | 2 CPU | 4 CPU | 8 CPU
--------+-------+-------+-------+------
-j | 20m15 | 9m56 | 5m20 | 4m20
-J | 20m15 | 9m49 | 5m00 | 2m40
So it is recommanded to use -j unless you have hundred of small log file
and can use at least 8 CPUs.
IMPORTANT: when you are using parallel parsing pgbadger will generate a lot
of temporary files in the /tmp directory and will remove them at end, so do
not remove those files unless pgbadger is not running. They are all named
with the following template tmp_pgbadgerXXXX.bin so they can be easily identified.
=head1 INCREMENTAL REPORTS
pgBadger include an automatic incremental report mode using option -I or
--incremental. When running in this mode, pgBadger will generate one report
per day and a cumulative report per week. Output is first done in binary
format into the mandatory output directory (see option -O or --outdir),
then in HTML format for daily and weekly reports with a main index file.
The main index file will show a dropdown menu per week with a link to the week
report and links to daily reports of this week.
For example, if you run pgBadger as follow based on a daily rotated file:
0 4 * * * /usr/bin/pgbadger -I -q /var/log/postgresql/postgresql.log.1 \
-O /var/www/pg_reports/
you will have all daily and weekly reports for the full running period.
In this mode pgBagder will create an automatic incremental file into the
output directory, so you don't have to use the -l option unless you want
to change the path of that file. This mean that you can run pgBadger in
this mode each days on a log file rotated each week, it will not count
the log entries twice.
To save disk space you may want to use the -X or --extra-files command line
option to force pgBadger to write javascript and css to separate files in
the output directory. The resources will then be loaded using script and
link tag.
=head1 BINARY FORMAT
Using the binary format it is possible to create custom incremental and
cumulative reports. For example, if you want to refresh a pgbadger report
each hour from a daily PostgreSQl log file, you can proceed by running each
hour the following commands:
pgbadger --last-parsed .pgbadger_last_state_file -o sunday/hourX.bin /var/log/pgsql/postgresql-Sun.log
to generate the incremental data files in binary format. And to generate the fresh HTML
report from that binary file:
pgbadger sunday/*.bin
Or an other example, if you have one log file per hour and you want a reports to be
rebuild each time the log file is switched. Proceed as follow:
pgbadger -o day1/hour01.bin /var/log/pgsql/pglog/postgresql-2012-03-23_10.log
pgbadger -o day1/hour02.bin /var/log/pgsql/pglog/postgresql-2012-03-23_11.log
pgbadger -o day1/hour03.bin /var/log/pgsql/pglog/postgresql-2012-03-23_12.log
...
When you want to refresh the HTML report, for example each time after a new binary file
is generated, just do the following:
pgbadger -o day1_report.html day1/*.bin
Adjust the commands following your needs.
=head1 JSON FORMAT
JSON format is good for sharing data with other languages, which makes it
easy to integrate pgBadger's result into other monitoring tools like Cacti
or Graphite.
=head1 AUTHORS
pgBadger is an original work from Gilles Darold.
The pgBadger logo is an original creation of Damien Clochard.
The pgBadger v4.x design comes from the "Art is code" company.
This web site is a work of Gilles Darold.
pgBadger is maintained by Gilles Darold, the good folks at Dalibo, and every one who wants to contribute.
Many people have contributed to pgBadger, they are all quoted in the Changelog file.
=head1 LICENSE
pgBadger is free software distributed under the PostgreSQL Licence.
Copyright (c) 2012-2015, Dalibo
A modified version of the SQL::Beautify Perl Module is embedded in pgBadger
with copyright (C) 2009 by Jonas Kramer and is published under the terms of
the Artistic License 2.0.
pgbadger-7.1/pgbadger 0000775 0000000 0000000 00003524075 12550162376 0014664 0 ustar 00root root 0000000 0000000 #!/usr/bin/env perl
#------------------------------------------------------------------------------
#
# pgBadger - Advanced PostgreSQL log analyzer
#
# This program is open source, licensed under the PostgreSQL Licence.
# For license terms, see the LICENSE file.
#------------------------------------------------------------------------------
#
# Settings in postgresql.conf
#
# You should enable SQL query logging with log_min_duration_statement >= 0
# With stderr output
# Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] '
# Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d '
# Log line prefix should be: log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u '
# If you need report per client Ip adresses you can add client=%h or remote=%h
# pgbadger will also recognized the following form:
# log_line_prefix = '%t [%p]: [%l-1] db=%d,user=%u,client=%h '
# or
# log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,remote=%h '
# With syslog output
# Log line prefix should be: log_line_prefix = 'db=%d,user=%u '
#
# Additional information that could be collected and reported
# log_checkpoints = on
# log_connections = on
# log_disconnections = on
# log_lock_waits = on
# log_temp_files = 0
# log_autovacuum_min_duration = 0
#------------------------------------------------------------------------------
use vars qw($VERSION);
use strict qw(vars subs);
use Getopt::Long qw(:config no_ignore_case bundling);
use IO::File;
use Benchmark;
use File::Basename;
use Storable qw(store_fd fd_retrieve);
use Time::Local 'timegm_nocheck';
use POSIX qw(locale_h sys_wait_h _exit strftime);
setlocale(LC_NUMERIC, '');
setlocale(LC_ALL, 'C');
use File::Spec qw/ tmpdir /;
use File::Temp qw/ tempfile /;
use IO::Handle;
use IO::Pipe;
use FileHandle;
use Socket;
use constant EBCDIC => "\t" ne "\011";
$VERSION = '7.1';
$SIG{'CHLD'} = 'DEFAULT';
my $TMP_DIR = File::Spec->tmpdir() || '/tmp';
my %RUNNING_PIDS = ();
my @tempfiles = ();
my $parent_pid = $$;
my $interrupt = 0;
my $tmp_last_parsed = '';
my @SQL_ACTION = ('SELECT', 'INSERT', 'UPDATE', 'DELETE');
my @LATENCY_PERCENTILE = sort {$a <=> $b} (99,95,90);
my $graphid = 1;
my $NODATA = '
NO DATASET
';
my $MAX_QUERY_LENGTH = 20480;
my $terminate = 0;
my %CACHE_DNS = ();
my $DNSLookupTimeout = 1; # (in seconds)
my $EXPLAIN_URL = 'http://explain.depesz.com/?is_public=0&is_anon=0&plan=';
my @E2A = (
0, 1, 2, 3,156, 9,134,127,151,141,142, 11, 12, 13, 14, 15,
16, 17, 18, 19,157, 10, 8,135, 24, 25,146,143, 28, 29, 30, 31,
128,129,130,131,132,133, 23, 27,136,137,138,139,140, 5, 6, 7,
144,145, 22,147,148,149,150, 4,152,153,154,155, 20, 21,158, 26,
32,160,226,228,224,225,227,229,231,241,162, 46, 60, 40, 43,124,
38,233,234,235,232,237,238,239,236,223, 33, 36, 42, 41, 59, 94,
45, 47,194,196,192,193,195,197,199,209,166, 44, 37, 95, 62, 63,
248,201,202,203,200,205,206,207,204, 96, 58, 35, 64, 39, 61, 34,
216, 97, 98, 99,100,101,102,103,104,105,171,187,240,253,254,177,
176,106,107,108,109,110,111,112,113,114,170,186,230,184,198,164,
181,126,115,116,117,118,119,120,121,122,161,191,208, 91,222,174,
172,163,165,183,169,167,182,188,189,190,221,168,175, 93,180,215,
123, 65, 66, 67, 68, 69, 70, 71, 72, 73,173,244,246,242,243,245,
125, 74, 75, 76, 77, 78, 79, 80, 81, 82,185,251,252,249,250,255,
92,247, 83, 84, 85, 86, 87, 88, 89, 90,178,212,214,210,211,213,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,179,219,220,217,218,159
);
if (EBCDIC && ord('^') == 106) { # as in the BS2000 posix-bc coded character set
$E2A[74] = 96; $E2A[95] = 159; $E2A[106] = 94; $E2A[121] = 168;
$E2A[161] = 175; $E2A[173] = 221; $E2A[176] = 162; $E2A[186] = 172;
$E2A[187] = 91; $E2A[188] = 92; $E2A[192] = 249; $E2A[208] = 166;
$E2A[221] = 219; $E2A[224] = 217; $E2A[251] = 123; $E2A[253] = 125;
$E2A[255] = 126;
}
elsif (EBCDIC && ord('^') == 176) { # as in codepage 037 on os400
$E2A[21] = 133; $E2A[37] = 10; $E2A[95] = 172; $E2A[173] = 221;
$E2A[176] = 94; $E2A[186] = 91; $E2A[187] = 93; $E2A[189] = 168;
}
my $pgbadger_logo =
'';
my $pgbadger_ico =
'data:image/x-icon;base64,
AAABAAEAIyMQAAEABAA8BAAAFgAAACgAAAAjAAAARgAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAgAAGRsZACgqKQA2OTcASEpJAFpdWwBoa2kAeHt5AImMigCeoZ8AsLOxAMTHxQDR1NIA
5enmAPv+/AAAAAAA///////////////////////wAAD///////////H///////////AAAP//////
//9Fq7Yv////////8AAA////////8V7u7qD////////wAAD///////8B7qWN5AL///////AAAP//
///y8Avrc3rtMCH/////8AAA/////xABvbAAAJ6kAA/////wAAD////wAG5tQAAADp6RAP////AA
AP//MQBd7C2lRESOWe5xAD//8AAA//8APO7iC+7e7u4A3uxwBf/wAAD/9Aju7iAAvu7u0QAN7ukA
7/AAAP/wCe7kAAAF7ugAAAHO6xD/8AAA//AK7CAAAAHO1AAAABnrEP/wAAD/8ArAAAAAAc7kAAAA
AIwQ//AAAP/wCjAAAAAC3uQAAAAAHBCf8AAA//AIEBVnIATu5gAXZhAFEP/wAAD/8AIAqxdwBu7p
AFoX0QIQ//AAAP/wAAPsBCAL7u4QBwfmAAD/8AAA//AAA8owAC7u7lAAKbYAAJ/wAAD/8AAAAAAA
fu7uwAAAAAAA//AAAP/wAAAAAADu7u7jAAAAAAD/8AAA//AAAAAABe7u7uoAAAAAAP/wAAD/8AAA
AAAL7u7u7QAAAAAAn/AAAP/wAAAAAB3u7u7uYAAAAAD/8AAA//MAAAAATu7u7u6QAAAAAP/wAAD/
/wAAAAAM7u7u7TAAAAAD//AAAP//IQAAAAKu7u7UAAAAAB//8AAA////IAAAAAju7BAAAAAP///w
AAD////2AAA1je7ulUAAA/////AAAP/////xEAnO7u7pIAH/////8AAA//////9CABju6iACP///
///wAAD////////wAAggAP////////AAAP////////8wAAA/////////8AAA///////////w////
///////wAAD///////////////////////AAAP/////gAAAA//+//+AAAAD//Af/4AAAAP/4A//g
AAAA//AA/+AAAAD/oAA/4AAAAP8AAB/gAAAA/gAAD+AAAADwAAAB4AAAAPAAAADgAAAA4AAAAGAA
AADgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAAYAAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAA
AOAAAABgAAAA4AAAAOAAAADgAAAA4AAAAOAAAADgAAAA4AAAAGAAAADgAAAA4AAAAOAAAADgAAAA
8AAAAOAAAADwAAAB4AAAAPwAAAfgAAAA/gAAD+AAAAD/gAA/4AAAAP/AAH/gAAAA//gD/+AAAAD/
/Af/4AAAAP//v//gAAAA/////+AAAAA
';
####
# method used to fork as many child as wanted
##
sub spawn
{
my $coderef = shift;
unless (@_ == 0 && $coderef && ref($coderef) eq 'CODE') {
print "usage: spawn CODEREF";
exit 0;
}
my $pid;
if (!defined($pid = fork)) {
print STDERR "Error: cannot fork: $!\n";
return;
} elsif ($pid) {
$RUNNING_PIDS{$pid} = $pid;
return; # the parent
}
# the child -- go spawn
$< = $>;
$( = $); # suid progs only
exit &$coderef();
}
# Command line options
my $zcat_cmd = 'gunzip -c';
my $zcat = $zcat_cmd;
my $bzcat = 'bunzip2 -c';
my $ucat = 'unzip -p';
my $xzcat = 'xzcat';
my $gzip_uncompress_size = "gunzip -l %f | grep -E '^\\s*[0-9]+' | awk '{print \$2}'";
my $zip_uncompress_size = "unzip -l %f | awk '{if (NR==4) print \$1}'";
my $xz_uncompress_size = "xz --robot -l %f | grep totals | awk '{print \$5}'";
my $format = '';
my $outfile = '';
my $outdir = '';
my $incremental = '';
my $extra_files = 0;
my $help = '';
my $ver = '';
my @dbname = ();
my @dbuser = ();
my @dbclient = ();
my @dbappname = ();
my @exclude_user = ();
my @exclude_appname = ();
my @exclude_line = ();
my $ident = '';
my $top = 0;
my $sample = 3;
my $extension = '';
my $maxlength = 0;
my $graph = 1;
my $nograph = 0;
my $debug = 0;
my $nohighlight = 0;
my $noprettify = 0;
my $from = '';
my $to = '';
my $quiet = 0;
my $progress = 1;
my $error_only = 0;
my @exclude_query = ();
my @exclude_time = ();
my $exclude_file = '';
my @include_query = ();
my $include_file = '';
my $disable_error = 0;
my $disable_hourly = 0;
my $disable_type = 0;
my $disable_query = 0;
my $disable_session = 0;
my $disable_connection = 0;
my $disable_lock = 0;
my $disable_temporary = 0;
my $disable_checkpoint = 0;
my $disable_autovacuum = 0;
my $avg_minutes = 5;
my $histo_avg_minutes = 60;
my $last_parsed = '';
my $report_title = '';
my $log_line_prefix = '';
my $compiled_prefix = '';
my $project_url = 'http://dalibo.github.com/pgbadger/';
my $t_min = 0;
my $t_max = 0;
my $remove_comment = 0;
my $select_only = 0;
my $tsung_queries = 0;
my $queue_size = 0;
my $job_per_file = 0;
my $charset = 'utf-8';
my $csv_sep_char = ',';
my %current_sessions = ();
my $incr_date = '';
my $last_incr_date = '';
my $anonymize = 0;
my $noclean = 0;
my $retention = 0;
my $bar_graph = 0;
my $dns_resolv = 0;
my $nomultiline = 0;
my $noreport = 0;
my $log_duration = 0;
my $logfile_list = '';
my $enable_checksum = 0;
my $NUMPROGRESS = 10000;
my @DIMENSIONS = (800, 300);
my $RESRC_URL = '';
my $img_format = 'png';
my @log_files = ();
my %prefix_vars = ();
my $remote_host = '';
my $ssh_command = '';
my $ssh_bin = 'ssh';
my $ssh_identity = '';
my $ssh_user = '';
my $ssh_timeout = 10;
my $ssh_options = "-o ConnectTimeout=$ssh_timeout -o PreferredAuthentications=hostbased,publickey";
# OBSOLETE, to be removed
# List of regex that match fatal error message that do not
# generate disconnection line in log. This is to prevent
# sessions in the sessions charts to increase continually.
# See issue #176 on github
my @session_closed_msg = (
qr/^(database|role) "[^"]+" does not exist$/,
qr/^no pg_hba.conf entry for/,
);
my $sql_prettified;
# Do not display data in pie where percentage is lower than this value
# to avoid label overlapping.
my $pie_percentage_limit = 2;
# Get the decimal separator
my $n = 5 / 2;
my $num_sep = ',';
$num_sep = ' ' if ($n =~ /,/);
# Inform the parent that it should stop iterate on parsing other files
sub stop_parsing
{
&logmsg('DEBUG', "Received interrupt signal");
$interrupt = 1;
}
# With multiprocess we need to wait for all children
sub wait_child
{
my $sig = shift;
$interrupt = 2;
print STDERR "Received terminating signal ($sig).\n";
if ($^O !~ /MSWin32|dos/i) {
1 while wait != -1;
$SIG{INT} = \&wait_child;
$SIG{TERM} = \&wait_child;
foreach my $f (@tempfiles) {
unlink("$f->[1]") if (-e "$f->[1]");
}
}
if ($last_parsed && -e "$tmp_last_parsed") {
unlink("$tmp_last_parsed");
}
if ($last_parsed && -e "$last_parsed.tmp") {
unlink("$last_parsed.tmp");
}
_exit(0);
}
$SIG{INT} = \&wait_child;
$SIG{TERM} = \&wait_child;
$SIG{USR2} = \&stop_parsing;
$| = 1;
# get the command line parameters
my $result = GetOptions(
"a|average=i" => \$avg_minutes,
"A|histo-average=i" => \$histo_avg_minutes,
"b|begin=s" => \$from,
"B|bar-graph!" => \$bar_graph,
"c|dbclient=s" => \@dbclient,
"C|nocomment!" => \$remove_comment,
"d|dbname=s" => \@dbname,
"D|dns-resolv!" => \$dns_resolv,
"e|end=s" => \$to,
"f|format=s" => \$format,
"G|nograph!" => \$nograph,
"h|help!" => \$help,
"i|ident=s" => \$ident,
"I|incremental!" => \$incremental,
"j|jobs=i" => \$queue_size,
"J|job_per_file=i" => \$job_per_file,
"l|last-parsed=s" => \$last_parsed,
"L|logfile-list=s" => \$logfile_list,
"m|maxlength=i" => \$maxlength,
"M|no-multiline!" => \$nomultiline,
"N|appname=s" => \@dbappname,
"n|nohighlight!" => \$nohighlight,
"o|outfile=s" => \$outfile,
"O|outdir=s" => \$outdir,
"p|prefix=s" => \$log_line_prefix,
"P|no-prettify!" => \$noprettify,
"q|quiet!" => \$quiet,
"r|remote-host=s" => \$remote_host,
'R|retention=i' => \$retention,
"s|sample=i" => \$sample,
"S|select-only!" => \$select_only,
"t|top=i" => \$top,
"T|title=s" => \$report_title,
"u|dbuser=s" => \@dbuser,
"U|exclude-user=s" => \@exclude_user,
"v|verbose!" => \$debug,
"V|version!" => \$ver,
"w|watch-mode!" => \$error_only,
"x|extension=s" => \$extension,
"X|extra-files!" => \$extra_files,
"z|zcat=s" => \$zcat,
"pie-limit=i" => \$pie_percentage_limit,
"image-format=s" => \$img_format,
"exclude-query=s" => \@exclude_query,
"exclude-file=s" => \$exclude_file,
"exclude-appname=s" => \@exclude_appname,
"include-query=s" => \@include_query,
"exclude-line=s" => \@exclude_line,
"include-file=s" => \$include_file,
"disable-error!" => \$disable_error,
"disable-hourly!" => \$disable_hourly,
"disable-type!" => \$disable_type,
"disable-query!" => \$disable_query,
"disable-session!" => \$disable_session,
"disable-connection!" => \$disable_connection,
"disable-lock!" => \$disable_lock,
"disable-temporary!" => \$disable_temporary,
"disable-checkpoint!" => \$disable_checkpoint,
"disable-autovacuum!" => \$disable_autovacuum,
"charset=s" => \$charset,
"csv-separator=s" => \$csv_sep_char,
"exclude-time=s" => \@exclude_time,
'ssh-command=s' => \$ssh_command,
'ssh-program=s' => \$ssh_bin,
'ssh-identity=s' => \$ssh_identity,
'ssh-option=s' => \$ssh_options,
'ssh-user=s' => \$ssh_user,
'ssh-timeout=i' => \$ssh_timeout,
'anonymize!' => \$anonymize,
'noclean!' => \$noclean,
'noreport!' => \$noreport,
'log-duration!' => \$log_duration,
'enable-checksum!' => \$enable_checksum,
);
die "FATAL: use pgbadger --help\n" if (not $result);
$report_title = &escape_html($report_title) if $report_title;
if ($ver) {
print "pgBadger version $VERSION\n";
exit 0;
}
&usage() if ($help);
# Try to load Digest::MD5 when asked
if ($enable_checksum) {
if (eval {require Digest::MD5;1} ne 1) {
die("Can not load Perl module Digest::MD5.\n");
} else {
Digest::MD5->import('md5_hex');
}
}
# Rewrite some command line arguments as lists
&compute_arg_list();
# If pgBadger must parse remote files set the ssh command
if ($remote_host) {
# If no user defined ssh command
if (!$ssh_command) {
$ssh_command = $ssh_bin || 'ssh';
$ssh_command .= " -i $ssh_identity" if ($ssh_identity);
$ssh_command .= " $ssh_options" if ($ssh_options);
if ($ssh_user) {
$ssh_command .= " $ssh_user\@$remote_host";
} else {
$ssh_command .= " $remote_host";
}
}
}
# Log files to be parsed are passed as command line arguments
if ($#ARGV >= 0) {
foreach my $file (@ARGV) {
if ($file ne '-') {
if (!$remote_host) {
die "FATAL: logfile $file must exist!\n" if not -f $file;
if (-z $file) {
print "WARNING: file $file is empty\n";
next;
}
push(@log_files, $file);
} else {
# Get files from remote host
&logmsg('DEBUG', "Looking for remote filename using command: $ssh_command \"ls $file\"");
my @rfiles = `$ssh_command "ls $file"`;
foreach my $f (@rfiles) {
push(@log_files, $f);
}
}
} else {
if ($logfile_list) {
die "FATAL: stdin input - can not be used with logfile list (-L).\n";
}
push(@log_files, $file);
}
}
}
if ($logfile_list) {
if (!-e $logfile_list) {
die "FATAL: logfile list $logfile_list must exist!\n";
}
if (not open(IN, $logfile_list)) {
die "FATAL: can not read logfile list $logfile_list, $!.\n";
}
my @files = ;
close(IN);
foreach my $file (@files) {
chomp($file);
$file =~ s/\r//;
if ($file eq '-') {
die "FATAL: stdin input - can not be used with logfile list.\n";
}
if (!$remote_host) {
die "FATAL: logfile $file must exist!\n" if not -f $file;
if (-z $file) {
print "WARNING: file $file is empty\n";
next;
}
push(@log_files, $file);
} else {
# Get files from remote host
&logmsg('DEBUG', "Looking for remote filename using command: $ssh_command \"ls $file\"");
my @rfiles = `$ssh_command "ls $file"`;
foreach my $f (@rfiles) {
push(@log_files, $f);
}
}
}
}
# Logfile is a mandatory parameter
if ($#log_files < 0) {
print STDERR "FATAL: you must give a log file as command line parameter.\n\n";
&usage();
}
# Quiet mode is forced with progress bar
$progress = 0 if ($quiet);
# Set the default number minutes for queries and connections average
$avg_minutes ||= 5;
$avg_minutes = 60 if ($avg_minutes > 60);
$avg_minutes = 1 if ($avg_minutes < 1);
$histo_avg_minutes ||= 60;
$histo_avg_minutes = 60 if ($histo_avg_minutes > 60);
$histo_avg_minutes = 1 if ($histo_avg_minutes < 1);
my @avgs = ();
for (my $i = 0 ; $i < 60 ; $i += $avg_minutes) {
push(@avgs, sprintf("%02d", $i));
}
my @histo_avgs = ();
for (my $i = 0 ; $i < 60 ; $i += $histo_avg_minutes) {
push(@histo_avgs, sprintf("%02d", $i));
}
# Set error like log level regex
my $parse_regex = qr/^(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT)/;
my $full_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC|DETAIL|HINT|STATEMENT|CONTEXT)/;
my $main_error_regex = qr/^(WARNING|ERROR|FATAL|PANIC)/;
# Set syslog prefix regex
my $other_syslog_line =
qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*)/;
my $orphan_syslog_line = qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:/;
my $orphan_stderr_line = '';
# Simply genreate a random string, thanks to Perlmonks
# Set default format
my $frmt = '';
if (!$remote_host) {
$frmt = &autodetect_format($log_files[0]);
} elsif (!$format) {
die "FATAL: you must give a log file format (-f or --format) when using remote connection.\n\n";
}
$format ||= $frmt;
if ($format eq 'syslog2') {
$other_syslog_line =
qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*)/;
$orphan_syslog_line = qr/^(\d+-\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:/;
}
# Set default top query
$top ||= 20;
# Set the default extension and output format
if (!$extension) {
if ($outfile =~ /\.bin/i) {
$extension = 'binary';
} elsif ($outfile =~ /\.json/i) {
if (eval {require JSON::XS;1;} ne 1) {
die("Can not save output in json format, please install Perl module JSON::XS first.\n");
} else {
JSON::XS->import();
}
$extension = 'json';
} elsif ($outfile =~ /\.tsung/i) {
$extension = 'tsung';
} elsif ($outfile =~ /\.htm[l]*/i) {
$extension = 'html';
} elsif ($outfile) {
$extension = 'txt';
} else {
$extension = 'html';
}
} elsif (lc($extension) eq 'json') {
if (eval {require JSON::XS;1;} ne 1) {
die("Can not save output in json format, please install Perl module JSON::XS first.\n");
} else {
JSON::XS->import();
}
}
# Set default filename of the output file
$outfile ||= 'out.' . $extension;
&logmsg('DEBUG', "Output '$extension' reports will be written to $outfile");
# Set default syslog ident name
$ident ||= 'postgres';
# Set default pie percentage limit or fix value
$pie_percentage_limit = 0 if ($pie_percentage_limit < 0);
$pie_percentage_limit = 2 if ($pie_percentage_limit eq '');
$pie_percentage_limit = 100 if ($pie_percentage_limit > 100);
# Set default download image format
$img_format = lc($img_format);
$img_format = 'jpeg' if ($img_format eq 'jpg');
$img_format = 'png' if ($img_format ne 'jpeg');
# Extract the output directory from outfile so that graphs will
# be created in the same directory
if ($outfile ne '-') {
if (!$outdir) {
my @infs = fileparse($outfile);
if ($infs[0] ne '') {
$outdir = $infs[1];
} else {
# maybe a confusion between -O and -o
die "FATAL: output file $outfile is a directory, should be a file\nor maybe you want to use -O | --outdir option instead.\n";
}
} elsif (!-d "$outdir") {
# An output directory has been passed as command line parameter
die "FATAL: $outdir is not a directory or doesn't exist.\n";
}
$outfile = basename($outfile);
$outfile = $outdir . '/' . $outfile;
}
# Remove graph support if output is not html
$graph = 0 unless ($extension eq 'html' or $extension eq 'binary' or $extension eq 'json');
$graph = 0 if ($nograph);
# Set some default values
my $end_top = $top - 1;
$queue_size ||= 1;
$job_per_file ||= 1;
if ($^O =~ /MSWin32|dos/i) {
if ( ($queue_size > 1) || ($job_per_file > 1) ) {
print STDERR "WARNING: parallel processing is not supported on this platform.\n";
$queue_size = 1;
$job_per_file = 1;
}
}
if ($extension eq 'tsung') {
# Open filehandle
my $fh = new IO::File ">$outfile";
if (not defined $fh) {
die "FATAL: can't write to $outfile, $!\n";
}
print $fh qq{
};
$fh->close();
} else {
# Test file creation before going to parse log
my $tmpfh = new IO::File ">$outfile";
if (not defined $tmpfh) {
die "FATAL: can't write to $outfile, $!\n";
}
$tmpfh->close();
unlink($outfile) if (-e $outfile);
}
# -w and --disable-error can't go together
if ($error_only && $disable_error) {
die "FATAL: please choose between no event report and reporting events only.\n";
}
# Set default search pattern for database, user name, application name and host in log_line_prefix
my $regex_prefix_dbname = qr/db=([^,]*)/;
my $regex_prefix_dbuser = qr/user=([^,]*)/;
my $regex_prefix_dbclient = qr/(?:client|remote)=([^,]*)/;
my $regex_prefix_dbappname = qr/app=([^,]*)/;
# Set pattern to look for query type
my $action_regex = qr/^[\s\(]*(DELETE|INSERT|UPDATE|SELECT|COPY)/is;
# Loading excluded query from file if any
if ($exclude_file) {
open(IN, "$exclude_file") or die "FATAL: can't read file $exclude_file: $!\n";
my @exclq = ;
close(IN);
chomp(@exclq);
map {s/\r//;} @exclq;
foreach my $r (@exclq) {
&check_regex($r, '--exclude-file');
}
push(@exclude_query, @exclq);
}
# Testing regex syntax
if ($#exclude_query >= 0) {
foreach my $r (@exclude_query) {
&check_regex($r, '--exclude-query');
}
}
# Testing regex syntax
if ($#exclude_time >= 0) {
foreach my $r (@exclude_time) {
&check_regex($r, '--exclude-time');
}
}
# Loading included query from file if any
if ($include_file) {
open(IN, "$include_file") or die "FATAL: can't read file $include_file: $!\n";
my @exclq = ;
close(IN);
chomp(@exclq);
map {s/\r//;} @exclq;
foreach my $r (@exclq) {
&check_regex($r, '--include-file');
}
push(@include_query, @exclq);
}
# Testing regex syntax
if ($#include_query >= 0) {
foreach my $r (@include_query) {
&check_regex($r, '--include-query');
}
}
# Compile custom log line prefix prefix
my @prefix_params = ();
if ($log_line_prefix) {
# Build parameters name that will be extracted from the prefix regexp
@prefix_params = &build_log_line_prefix_regex();
&check_regex($log_line_prefix, '--prefix');
if ($format eq 'syslog') {
$log_line_prefix =
'^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*'
. $log_line_prefix
. '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)';
$compiled_prefix = qr/$log_line_prefix/;
unshift(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line');
push(@prefix_params, 't_loglevel', 't_query');
} elsif ($format eq 'syslog2') {
$format = 'syslog';
$log_line_prefix =
'^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*'
. $log_line_prefix
. '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)';
$compiled_prefix = qr/$log_line_prefix/;
unshift(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line');
push(@prefix_params, 't_loglevel', 't_query');
} elsif ($format eq 'stderr') {
$orphan_stderr_line = qr/$log_line_prefix/;
$log_line_prefix = '^' . $log_line_prefix . '\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)';
$compiled_prefix = qr/$log_line_prefix/;
push(@prefix_params, 't_loglevel', 't_query');
}
} elsif ($format eq 'syslog') {
$compiled_prefix =
qr/^(...)\s+(\d+)\s(\d+):(\d+):(\d+)(?:\s[^\s]+)?\s([^\s]+)\s([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/;
push(@prefix_params, 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line',
't_logprefix', 't_loglevel', 't_query');
} elsif ($format eq 'syslog2') {
$format = 'syslog';
$compiled_prefix =
qr/^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)(?:.[^\s]+)?\s([^\s]+)\s(?:[^\s]+\s)?(?:[^\s]+\s)?([^\s\[]+)\[(\d+)\]:(?:\s\[[^\]]+\])?\s\[(\d+)\-\d+\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/;
push(@prefix_params, 't_year', 't_month', 't_day', 't_hour', 't_min', 't_sec', 't_host', 't_ident', 't_pid', 't_session_line',
't_logprefix', 't_loglevel', 't_query');
} elsif ($format eq 'stderr') {
$compiled_prefix =
qr/^(\d+-\d+-\d+\s\d+:\d+:\d+)[\.\d]*(?: [A-Z\d]{3,6})?\s\[(\d+)\]:\s\[(\d+)\-\d+\]\s*(.*?)\s*(LOG|WARNING|ERROR|FATAL|PANIC|DETAIL|STATEMENT|HINT|CONTEXT):\s+(?:[0-9A-Z]{5}:\s+)?(.*)/;
push(@prefix_params, 't_timestamp', 't_pid', 't_session_line', 't_logprefix', 't_loglevel', 't_query');
$orphan_stderr_line = qr/^(\d+-\d+-\d+\s\d+:\d+:\d+)[\.\d]*(?: [A-Z\d]{3,6})?\s\[(\d+)\]:\s\[(\d+)\-\d+\]\s*(.*?)\s*/;
}
sub check_regex
{
my ($pattern, $varname) = @_;
eval {m/$pattern/i;};
if ($@) {
die "FATAL: '$varname' invalid regex '$pattern', $!\n";
}
}
# Check start/end date time
if ($from) {
if ($from !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) {
die "FATAL: bad format for begin datetime, should be yyyy-mm-dd hh:mm:ss.l+tz\n";
} else {
my $fractional_seconds = $7 || "0";
$from = "$1-$2-$3 $4:$5:$6.$7"
}
}
if ($to) {
if ($to !~ /^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})([.]\d+([+-]\d+)?)?$/) {
die "FATAL: bad format for ending datetime, should be yyyy-mm-dd hh:mm:ss.l+tz\n";
} else {
my $fractional_seconds = $7 || "0";
$to = "$1-$2-$3 $4:$5:$6.$7"
}
}
# Stores the last parsed line from log file to allow incremental parsing
my $LAST_LINE = '';
# Set the level of the data aggregator, can be minute, hour or day follow the
# size of the log file.
my $LEVEL = 'hour';
# Month names
my %month_abbr = (
'Jan' => '01', 'Feb' => '02', 'Mar' => '03', 'Apr' => '04', 'May' => '05', 'Jun' => '06',
'Jul' => '07', 'Aug' => '08', 'Sep' => '09', 'Oct' => '10', 'Nov' => '11', 'Dec' => '12'
);
my %abbr_month = (
'01' => 'Jan', '02' => 'Feb', '03' => 'Mar', '04' => 'Apr', '05' => 'May', '06' => 'Jun',
'07' => 'Jul', '08' => 'Aug', '09' => 'Sep', '10' => 'Oct', '11' => 'Nov', '12' => 'Dec'
);
# Keywords variable
my @pg_keywords = qw(
ALL ANALYSE ANALYZE AND ANY ARRAY AS ASC ASYMMETRIC AUTHORIZATION BINARY BOTH CASE
CAST CHECK COLLATE COLLATION COLUMN CONCURRENTLY CONSTRAINT CREATE CROSS
CURRENT_DATE CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER
DEFAULT DEFERRABLE DESC DISTINCT DO ELSE END EXCEPT FALSE FETCH FOR FOREIGN FREEZE FROM
FULL GRANT GROUP HAVING ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING
LEFT LIKE LIMIT LOCALTIME LOCALTIMESTAMP NATURAL NOT NOTNULL NULL ON ONLY OPEN OR
ORDER OUTER OVER OVERLAPS PLACING PRIMARY REFERENCES RETURNING RIGHT SELECT SESSION_USER
SIMILAR SOME SYMMETRIC TABLE THEN TO TRAILING TRUE UNION UNIQUE USER USING VARIADIC
VERBOSE WHEN WHERE WINDOW WITH
);
my @beautify_pg_keywords = qw(
ANALYSE ANALYZE CONCURRENTLY FREEZE ILIKE ISNULL LIKE NOTNULL PLACING RETURNING VARIADIC
);
# Highlight variables
my @KEYWORDS1 = qw(
ALTER ADD AUTO_INCREMENT BETWEEN BY BOOLEAN BEGIN CHANGE COLUMNS COMMIT COALESCE CLUSTER
COPY DATABASES DATABASE DATA DELAYED DESCRIBE DELETE DROP ENCLOSED ESCAPED EXISTS EXPLAIN
FIELDS FIELD FLUSH FUNCTION GREATEST IGNORE INDEX INFILE INSERT IDENTIFIED IF INHERIT
KEYS KILL KEY LINES LOAD LOCAL LOCK LOW_PRIORITY LANGUAGE LEAST LOGIN MODIFY
NULLIF NOSUPERUSER NOCREATEDB NOCREATEROLE OPTIMIZE OPTION OPTIONALLY OUTFILE OWNER PROCEDURE
PROCEDURAL READ REGEXP RENAME RETURN REVOKE RLIKE ROLE ROLLBACK SHOW SONAME STATUS
STRAIGHT_JOIN SET SEQUENCE TABLES TEMINATED TRUNCATE TEMPORARY TRIGGER TRUSTED UN$filenumLOCK
USE UPDATE UNSIGNED VALUES VARIABLES VIEW VACUUM WRITE ZEROFILL XOR
ABORT ABSOLUTE ACCESS ACTION ADMIN AFTER AGGREGATE ALSO ALWAYS ASSERTION ASSIGNMENT AT ATTRIBUTE
BACKWARD BEFORE BIGINT CACHE CALLED CASCADE CASCADED CATALOG CHAIN CHARACTER CHARACTERISTICS
CHECKPOINT CLOSE COMMENT COMMENTS COMMITTED CONFIGURATION CONNECTION CONSTRAINTS CONTENT
CONTINUE CONVERSION COST CSV CURRENT CURSOR CYCLE DAY DEALLOCATE DEC DECIMAL DECLARE DEFAULTS
DEFERRED DEFINER DELIMITER DELIMITERS DICTIONARY DISABLE DISCARD DOCUMENT DOMAIN DOUBLE EACH
ENABLE ENCODING ENCRYPTED ENUM ESCAPE EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXTENSION EXTERNAL
FIRST FLOAT FOLLOWING FORCE FORWARD FUNCTIONS GLOBAL GRANTED HANDLER HEADER HOLD
HOUR IDENTITY IMMEDIATE IMMUTABLE IMPLICIT INCLUDING INCREMENT INDEXES INHERITS INLINE INOUT INPUT
INSENSITIVE INSTEAD INT INTEGER INVOKER ISOLATION LABEL LARGE LAST LC_COLLATE LC_CTYPE
LEAKPROOF LEVEL LISTEN LOCATION LOOP MAPPING MATCH MAXVALUE MINUTE MINVALUE MODE MONTH MOVE NAMES
NATIONAL NCHAR NEXT NO NONE NOTHING NOTIFY NOWAIT NULLS OBJECT OF OFF OIDS OPERATOR OPTIONS
OUT OWNED PARSER PARTIAL PARTITION PASSING PASSWORD PLANS PRECEDING PRECISION PREPARE
PREPARED PRESERVE PRIOR PRIVILEGES QUOTE RANGE REAL REASSIGN RECHECK RECURSIVE REF REINDEX RELATIVE
RELEASE REPEATABLE REPLICA RESET RESTART RESTRICT RETURNS ROW ROWS RULE SAVEPOINT SCHEMA SCROLL SEARCH
SECOND SECURITY SEQUENCES SERIALIZABLE SERVER SESSION SETOF SHARE SIMPLE SMALLINT SNAPSHOT STABLE
STANDALONE START STATEMENT STATISTICS STORAGE STRICT SYSID SYSTEM TABLESPACE TEMP
TEMPLATE TRANSACTION TREAT TYPE TYPES UNBOUNDED UNCOMMITTED UNENCRYPTED
UNKNOWN UNLISTEN UNLOGGED UNTIL VALID VALIDATE VALIDATOR VALUE VARYING VOLATILE
WHITESPACE WITHOUT WORK WRAPPER XMLATTRIBUTES XMLCONCAT XMLELEMENT XMLEXISTS XMLFOREST XMLPARSE
XMLPI XMLROOT XMLSERIALIZE YEAR YES ZONE
);
foreach my $k (@pg_keywords) {
push(@KEYWORDS1, $k) if (!grep(/^$k$/i, @KEYWORDS1));
}
my @KEYWORDS2 = (
'ascii', 'age',
'bit_length', 'btrim',
'char_length', 'character_length', 'convert', 'chr', 'current_date', 'current_time', 'current_timestamp', 'count',
'decode', 'date_part', 'date_trunc',
'encode', 'extract',
'get_byte', 'get_bit',
'initcap', 'isfinite', 'interval',
'justify_hours', 'justify_days',
'lower', 'length', 'lpad', 'ltrim', 'localtime', 'localtimestamp',
'md5',
'now',
'octet_length', 'overlay',
'position', 'pg_client_encoding',
'quote_ident', 'quote_literal',
'repeat', 'replace', 'rpad', 'rtrim',
'substring', 'split_part', 'strpos', 'substr', 'set_byte', 'set_bit',
'trim', 'to_ascii', 'to_hex', 'translate', 'to_char', 'to_date', 'to_timestamp', 'to_number', 'timeofday',
'upper',
);
my @KEYWORDS3 = ('STDIN', 'STDOUT');
my %SYMBOLS = (
'=' => '=', '<' => '<', '>' => '>', '\|' => '|', ',' => ',', '\.' => '.', '\+' => '+', '\-' => '-', '\*' => '*',
'\/' => '/', '!=' => '!='
);
my @BRACKETS = ('(', ')');
map {$_ = quotemeta($_)} @BRACKETS;
# Inbounds of query times histogram
my @histogram_query_time = (0, 1, 5, 10, 25, 50, 100, 500, 1000, 10000);
# Inbounds of session times histogram
my @histogram_session_time = (0, 500, 1000, 30000, 60000, 600000, 1800000, 3600000, 28800000);
# Get inbounds of query times histogram
sub get_hist_inbound
{
my ($duration, @histogram) = @_;
for (my $i = 0; $i <= $#histogram; $i++) {
return $histogram[$i-1] if ($histogram[$i] > $duration);
}
return -1;
}
# Where statistics are stored
my %overall_stat = ();
my %overall_checkpoint = ();
my @top_slowest = ();
my %normalyzed_info = ();
my %error_info = ();
my %logs_type = ();
my %per_minute_info = ();
my %lock_info = ();
my %tempfile_info = ();
my %cancelled_info = ();
my %connection_info = ();
my %database_info = ();
my %application_info = ();
my %user_info = ();
my %host_info = ();
my %session_info = ();
my %conn_received = ();
my %checkpoint_info = ();
my %autovacuum_info = ();
my %autoanalyze_info = ();
my @graph_values = ();
my %cur_info = ();
my %cur_temp_info = ();
my %cur_plan_info = ();
my %cur_cancel_info = ();
my %cur_lock_info = ();
my $nlines = 0;
my %last_line = ();
our %saved_last_line = ();
my %tsung_session = ();
my @top_locked_info = ();
my @top_tempfile_info = ();
my @top_cancelled_info = ();
my %drawn_graphs = ();
my $t0 = Benchmark->new;
# Write resources files from __DATA__ section if they have not been already copied
# and return the HTML links to that files. If --extra-file is not used returns the
# CSS and JS code to be embeded in HTML files
my @jscode = &write_resources();
# Automatically set parameters with incremental mode
if ($incremental) {
# In incremental mode an output directory must be set
if (!$outdir) {
die "FATAL: you must specify an output directory with incremental mode, see -O or --outdir.\n"
}
# Ensure this is not a relative path
if (dirname($outdir) eq '.') {
die "FATAL: output directory ($outdir) is not an absolute path.\n";
}
# Ensure that the directory already exists
if (!-d $outdir) {
die "FATAL: output directory $outdir does not exists\n";
}
# Set default last parsed file in incremental mode
if (!$last_parsed) {
$last_parsed = $outdir . '/LAST_PARSED';
}
$outfile = 'index.html';
# Set default output format
$extension = 'binary';
} else {
# Extra files for resources are not allowed without incremental mode
$extra_files = 0;
}
# Reading last line parsed
if ($last_parsed && -e $last_parsed) {
if (open(IN, "$last_parsed")) {
my $line = ;
close(IN);
($saved_last_line{datetime}, $saved_last_line{current_pos}, $saved_last_line{orig}) = split(/\t/, $line, 3);
# Preserve backward compatibility with version < 5
if ($saved_last_line{current_pos} =~ /\D/) {
$saved_last_line{orig} = $saved_last_line{current_pos} . "\t" . $saved_last_line{orig};
$saved_last_line{current_pos} = 0;
}
if ( ($format eq 'binary') || ($format eq 'csv') ) {
$saved_last_line{current_pos} = 0;
}
} else {
die "FATAL: can't read last parsed line from $last_parsed, $!\n";
}
}
$tmp_last_parsed = 'tmp_' . basename($last_parsed) if ($last_parsed);
$tmp_last_parsed = "$TMP_DIR/$tmp_last_parsed";
# Clean the incremental directory if the feature is not disabled
if (!$noclean && $saved_last_line{datetime} && $outdir) {
# Search the current week following the last parse date
$saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /;
my $last_year = $1;
my $last_month = $2;
my $last_day = $3;
# Get the week number following the date
my $wn = &get_week_number($last_year, $last_month, $last_day);
# Get the days of the current week where binary files must be preserved
my @wdays = &get_wdays_per_month($wn - 1, "$last_year-$last_month");
# Find obsolete dir days that shoud be cleaned
unless(opendir(DIR, "$outdir")) {
die "Error: can't opendir $outdir: $!";
}
my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
my @obsolete_days = ();
foreach my $y (sort { $a <=> $b } @dyears) {
unless(opendir(DIR, "$outdir/$y")) {
die "Error: can't opendir $outdir/$y: $!";
}
my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
foreach my $m (sort { $a <=> $b } @dmonths) {
unless(opendir(DIR, "$outdir/$y/$m")) {
die "Error: can't opendir $outdir/$y/$m: $!";
}
my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
foreach my $d (sort { $a <=> $b } @ddays) {
if ("$y-$m-$d" lt $wdays[0]) {
push(@obsolete_days, "$outdir/$y/$m/$d");
}
}
}
}
foreach my $p (@obsolete_days) {
unless(opendir(DIR, "$p")) {
die "Error: can't opendir $p: $!";
}
my @hfiles = grep { $_ =~ /\.(html|txt|tsung|json)$/i } readdir(DIR);
next if ($#hfiles == -1); # do not remove files if report file has not been generated
seekdir(DIR, 0);
my @bfiles = grep { $_ =~ /\.bin$/i } readdir(DIR);
closedir DIR;
foreach my $f (@bfiles) {
&logmsg('DEBUG', "Removing obsolete binary file: $p/$f");
unlink("$p/$f");
}
}
}
# Clear storage when a retention is specified in incremental mode
if ( $saved_last_line{datetime} && $outdir && $retention) {
# Search the current week following the last parse date
$saved_last_line{datetime} =~ /^(\d+)\-(\d+)\-(\d+) /;
my $last_year = $1;
my $last_month = $2;
my $last_day = $3;
# Get the current week number
my $wn = &get_week_number($last_year, $last_month, $last_day);
my $limit = $last_year;
if (($wn - $retention) < 1) {
$limit--;
$limit .= "52";
} else {
$limit .= sprintf("%02d", $wn - $retention);
}
# Find obsolete weeks dir that shoud be cleaned
unless(opendir(DIR, "$outdir")) {
die "Error: can't opendir $outdir: $!";
}
my @dyears = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
my @obsolete_weeks = ();
foreach my $y (sort { $a <=> $b } @dyears) {
unless(opendir(DIR, "$outdir/$y")) {
die "Error: can't opendir $outdir/$y: $!";
}
my @weeks = grep { $_ =~ /^week-\d+$/ } readdir(DIR);
closedir DIR;
foreach my $w (sort { $a <=> $b } @weeks) {
$w =~ /^week-(\d+)$/;
if ("$y$1" lt $limit) {
&logmsg('DEBUG', "Removing obsolete week directory $outdir/$y/week-$1");
&cleanup_directory("$outdir/$y/week-$1", 1);
push(@obsolete_weeks, "$y$1");
}
}
}
# Now removed the corresponding days
foreach my $y (sort { $a <=> $b } @dyears) {
unless(opendir(DIR, "$outdir/$y")) {
die "Error: can't opendir $outdir/$y: $!";
}
my @dmonths = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
my @rmmonths = ();
foreach my $m (sort { $a <=> $b } @dmonths) {
unless(opendir(DIR, "$outdir/$y/$m")) {
die "Error: can't opendir $outdir/$y/$m: $!";
}
my @rmdays = ();
my @ddays = grep { $_ =~ /^\d+$/ } readdir(DIR);
closedir DIR;
foreach my $d (sort { $a <=> $b } @ddays) {
my $weekNumber = sprintf("%02d", POSIX::strftime("%U", 1, 1, 1, $d, $m - 1, $y - 1900)+1);
if (grep(/^$y$weekNumber$/, @obsolete_weeks)) {
&logmsg('DEBUG', "Removing obsolete directory $outdir/$y/$m/$d");
&cleanup_directory("$outdir/$y/$m/$d", 1);
push(@rmdays, $d);
}
}
if ($#ddays == $#rmdays) {
&logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y/$m");
rmdir("$outdir/$y/$m");
push(@rmmonths, $m);
}
}
if ($#dmonths == $#rmmonths) {
&logmsg('DEBUG', "Removing obsolete empty directory $outdir/$y");
rmdir("$outdir/$y");
}
}
}
# Main loop reading log files
my $global_totalsize = 0;
my @given_log_files = ( @log_files );
chomp(@given_log_files);
# Verify that the file has not changed for incremental move
if (!$remote_host) {
if ($saved_last_line{current_pos} > 0) {
my @tmpfilelist = ();
# Removed files that have already been parsed during previous runs
foreach my $f (@given_log_files) {
if ($f eq '-') {
&logmsg('DEBUG', "waiting for log entries from stdin.");
$saved_last_line{current_pos} = 0;
push(@tmpfilelist, $f);
} elsif (!&check_file_changed($f, $saved_last_line{datetime})) {
&logmsg('DEBUG', "this file has already been parsed: $f");
} else {
push(@tmpfilelist, $f);
}
}
@given_log_files = ();
push(@given_log_files, @tmpfilelist);
}
} else {
# Disable multi process when using ssh to parse remote log
$queue_size = 1;
}
# log files must be erased when loading stats from binary format
if ($format eq 'binary') {
$queue_size = 1;
$job_per_file = 1;
}
my $pipe;
# Seeking to an old log position is not possible outside incremental mode
$saved_last_line{current_pos} = 0 if (!$last_parsed);
# Start parsing all given files using multiprocess
if ( ($#given_log_files >= 0) && (($queue_size > 1) || ($job_per_file > 1)) ) {
# Number of running process
my $child_count = 0;
# Set max number of parallel process
my $parallel_process = $queue_size;
if ($job_per_file > 1) {
$parallel_process = $job_per_file;
}
# Store total size of the log files
foreach my $logfile ( @given_log_files ) {
$global_totalsize += &get_log_file($logfile);
}
# Open a pipe for interprocess communication
my $reader = new IO::Handle;
my $writer = new IO::Handle;
$pipe = IO::Pipe->new($reader, $writer);
$writer->autoflush(1);
# Fork the logger process
if ($progress) {
spawn sub {
&multiprocess_progressbar($global_totalsize);
};
}
# Parse each log file following the multiprocess mode chosen (-j or -J)
foreach my $logfile ( @given_log_files ) {
while ($child_count >= $parallel_process) {
my $kid = waitpid(-1, WNOHANG);
if ($kid > 0) {
$child_count--;
delete $RUNNING_PIDS{$kid};
}
sleep(1);
}
# Do not use split method with compressed files
if ( ($queue_size > 1) && ($logfile !~ /\.(gz|bz2|zip|xz)$/i) ) {
# Create multiple processes to parse one log file by chunks of data
my @chunks = &split_logfile($logfile);
&logmsg('DEBUG', "The following boundaries will be used to parse file $logfile, " . join('|', @chunks));
for (my $i = 0; $i < $#chunks; $i++) {
while ($child_count >= $parallel_process) {
my $kid = waitpid(-1, WNOHANG);
if ($kid > 0) {
$child_count--;
delete $RUNNING_PIDS{$kid};
}
sleep(1);
}
die "FATAL: Abort signal received when processing to next chunk\n" if ($interrupt == 2);
last if ($interrupt);
push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, UNLINK => 1 ) ]);
spawn sub {
&process_file($logfile, $tempfiles[-1]->[0], $chunks[$i], $chunks[$i+1], $i);
};
$child_count++;
}
} else {
# Start parsing one file per parallel process
push(@tempfiles, [ tempfile('tmp_pgbadgerXXXX', SUFFIX => '.bin', DIR => $TMP_DIR, UNLINK => 1 ) ]);
spawn sub {
&process_file($logfile, $tempfiles[-1]->[0]);
};
$child_count++;
}
die "FATAL: Abort signal received when processing next file\n" if ($interrupt == 2);
last if ($interrupt);
}
my $minproc = 1;
$minproc = 0 if (!$progress);
# Wait for all child processes to die except for the logger
while (scalar keys %RUNNING_PIDS > $minproc) {
my $kid = waitpid(-1, WNOHANG);
if ($kid > 0) {
delete $RUNNING_PIDS{$kid};
}
sleep(1);
}
# Terminate the process logger
foreach my $k (keys %RUNNING_PIDS) {
kill(10, $k);
%RUNNING_PIDS = ();
}
# Clear previous statistics
&init_stats_vars();
# Load all data gathered by all the different processes
foreach my $f (@tempfiles) {
next if (!-e "$f->[1]" || -z "$f->[1]");
my $fht = new IO::File;
$fht->open("< $f->[1]") or die "FATAL: can't open temp file $f->[1], $!\n";
&load_stats($fht);
$fht->close();
}
} else {
# Multiprocessing disabled, parse log files one by one
foreach my $logfile ( @given_log_files ) {
last if (&process_file($logfile, '', $saved_last_line{current_pos}));
}
}
# Get last line parsed from all process
if ($last_parsed) {
if (open(IN, "$tmp_last_parsed") ) {
while (my $line = ) {
chomp($line);
my ($d, $p, $l) = split(/\t/, $line, 3);
if (!$last_line{datetime} || ($d gt $last_line{datetime})) {
$last_line{datetime} = $d;
if ($p =~ /^\d+$/) {
$last_line{orig} = $l;
$last_line{current_pos} = $p;
} else {
$last_line{orig} = $p . "\t" . $l;
}
}
}
close(IN);
}
unlink("$tmp_last_parsed");
}
# Save last line parsed
if ($last_parsed && $last_line{datetime} && $last_line{orig}) {
if (open(OUT, ">$last_parsed")) {
$last_line{current_pos} ||= 0;
print OUT "$last_line{datetime}\t$last_line{current_pos}\t$last_line{orig}\n";
close(OUT);
} else {
&logmsg('ERROR', "can't save last parsed line into $last_parsed, $!");
}
}
exit 2 if ($terminate);
my $t1 = Benchmark->new;
my $td = timediff($t1, $t0);
&logmsg('DEBUG', "the log statistics gathering took:" . timestr($td));
# Global output filehandle
my $fh = undef;
if (!$incremental && ($#given_log_files >= 0) ) {
&logmsg('LOG', "Ok, generating $extension report...");
if ($extension ne 'tsung') {
$fh = new IO::File ">$outfile";
if (not defined $fh) {
die "FATAL: can't write to $outfile, $!\n";
}
if (($extension eq 'text') || ($extension eq 'txt')) {
if ($error_only) {
&dump_error_as_text();
} else {
&dump_as_text();
}
} elsif ($extension eq 'json') {
if ($error_only) {
&dump_error_as_json();
} else {
&dump_as_json();
}
} elsif ($extension eq 'binary') {
&dump_as_binary($fh);
} else {
# Create instance to prettify SQL query
if (!$noprettify) {
$sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords);
}
&dump_as_html('.');
}
$fh->close;
} else {
# Open filehandle
$fh = new IO::File ">>$outfile";
if (not defined $fh) {
die "FATAL: can't write to $outfile, $!\n";
}
print $fh "\n\n";
$fh->close();
}
} elsif (!$incremental || !$noreport) {
# Build a report per day
my %weeks_directories = ();
my @build_directories = ();
if (open(IN, "$last_parsed.tmp")) {
while (my $l = ) {
chomp($l);
push(@build_directories, $l) if (!grep(/^$l$/, @build_directories));
}
close(IN);
unlink("$last_parsed.tmp");
} else {
&logmsg('WARNING', "can't read file $last_parsed.tmp, $!");
&logmsg('HINT', "maybe there's no new entries in your log since last run.");
}
foreach $incr_date (sort @build_directories) {
$last_incr_date = $incr_date;
# Set the path to binary files
my $bpath = $incr_date;
$bpath =~ s/\-/\//g;
$incr_date =~ /^(\d+)-(\d+)\-(\d+)$/;
# Get the week number following the date
my $wn = &get_week_number($1, $2, $3);
$weeks_directories{$wn} = "$1-$2" if (!exists $weeks_directories{$wn});
# First clear previous stored statistics
&init_stats_vars();
# Load all data gathered by all the different processes
unless(opendir(DIR, "$outdir/$bpath")) {
die "Error: can't opendir $outdir/$bpath: $!";
}
my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR);
closedir DIR;
foreach my $f (@mfiles) {
my $fht = new IO::File;
$fht->open("< $outdir/$bpath/$f") or die "FATAL: can't open file $outdir/$bpath/$f, $!\n";
&load_stats($fht);
$fht->close();
}
&logmsg('LOG', "Ok, generating HTML daily report into $outdir/$bpath/...");
$fh = new IO::File ">$outdir/$bpath/$outfile";
if (not defined $fh) {
die "FATAL: can't write to $outdir/$bpath/$outfile, $!\n";
}
# Create instance to prettify SQL query
if (!$noprettify) {
$sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords);
}
&dump_as_html('../../..');
$fh->close;
}
# Build a report per week
foreach my $wn (sort { $a <=> $b } keys %weeks_directories) {
&init_stats_vars();
# Get all days of the current week
my @wdays = &get_wdays_per_month($wn - 1, $weeks_directories{$wn});
my $wdir = '';
# Load data per day
foreach $incr_date (@wdays) {
my $bpath = $incr_date;
$bpath =~ s/\-/\//g;
$incr_date =~ /^(\d+)\-(\d+)\-(\d+)$/;
$wdir = "$1/week-$wn";
# Load all data gathered by all the differents processes
if (-e "$outdir/$bpath") {
unless(opendir(DIR, "$outdir/$bpath")) {
die "Error: can't opendir $outdir/$bpath: $!";
}
my @mfiles = grep { !/^\./ && ($_ =~ /\.bin$/) } readdir(DIR);
closedir DIR;
foreach my $f (@mfiles) {
my $fht = new IO::File;
$fht->open("< $outdir/$bpath/$f") or die "FATAL: can't open file $outdir/$bpath/$f, $!\n";
&load_stats($fht);
$fht->close();
}
}
}
&logmsg('LOG', "Ok, generating HTML weekly report into $outdir/$wdir/...");
if (!-d "$outdir/$wdir") {
mkdir("$outdir/$wdir");
}
$fh = new IO::File ">$outdir/$wdir/$outfile";
if (not defined $fh) {
die "FATAL: can't write to $outdir/$wdir/$outfile, $!\n";
}
# Create instance to prettify SQL query
if (!$noprettify) {
$sql_prettified = SQL::Beautify->new(keywords => \@beautify_pg_keywords);
}
&dump_as_html('../..');
$fh->close;
}
&logmsg('LOG', "Ok, generating global index to access incremental reports...");
$fh = new IO::File ">$outdir/index.html";
if (not defined $fh) {
die "FATAL: can't write to $outdir/index.html, $!\n";
}
my $date = localtime(time);
my @tmpjscode = @jscode;
map { s/EDIT_URI/\./; } @tmpjscode;
my $local_title = 'Global Index on incremental reports';
if ($report_title) {
$local_title = 'Global Index - ' . $report_title;
}
print $fh qq{
pgBadger :: $local_title
@tmpjscode
};
# get year directories
unless(opendir(DIR, "$outdir")) {
die "Error: can't opendir $outdir: $!";
}
my @dyears = grep { !/^\./ && /^\d{4}$/ } readdir(DIR);
closedir DIR;
foreach my $y (sort { $b <=> $a } @dyears) {
print $fh qq{
Year $y
};
# foreach year directory look for week directories
unless(opendir(DIR, "$outdir/$y")) {
die "Error: can't opendir $outdir/$y: $!";
}
my @ymonths = grep { /^\d{2}$/ } readdir(DIR);
closedir DIR;
my $i = 1;
foreach my $m (sort {$a <=> $b } @ymonths) {
print $fh "
\n";
}
$query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query});
my $md5 = '';
$md5 = 'md5: ' . md5_hex($normalyzed_info{$k}{samples}{$d}{query}) if ($enable_checksum);
print $fh qq{
$query
$md5
$details
$explain
};
$idx++;
}
print $fh qq{
};
$rank++;
}
if (!$found) {
print $fh qq{
$NODATA
};
}
print $fh qq{
};
}
sub dump_as_html
{
my $uri = shift;
# Dump the html header
&html_header($uri);
if (!$error_only) {
# Overall statistics
print $fh qq{
};
&print_overall_statistics();
# Set graphs limits
$overall_stat{'first_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/;
$t_min = timegm_nocheck(0, $5, $4, $3, $2 - 1, $1) * 1000;
$t_min -= ($avg_minutes * 60000);
$overall_stat{'last_log_ts'} =~ /^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/;
$t_max = timegm_nocheck(59, $5, $4, $3, $2 - 1, $1) * 1000;
$t_max += ($avg_minutes * 60000);
if (!$disable_hourly) {
# Build graphs based on hourly stat
&compute_query_graphs();
# Show global SQL traffic
&print_sql_traffic();
# Show hourly statistics
&print_general_activity();
}
if (!$disable_connection) {
print $fh qq{
Connections
};
# Draw connections information
&print_established_connection() if (!$disable_hourly);
# Show per database/user connections
&print_database_connection() if (exists $connection_info{database});
# Show per user connections
&print_user_connection() if (exists $connection_info{user});
# Show per client ip connections
&print_host_connection() if (exists $connection_info{host});
}
# Show session per database statistics
if (!$disable_session) {
print $fh qq{
Sessions
};
# Show number of simultaneous sessions
&print_simultaneous_session();
# Show histogram for session times
&print_histogram_session_times();
# Show per database sessions
&print_database_session();
# Show per user sessions
&print_user_session();
# Show per host sessions
&print_host_session();
# Show per application sessions
&print_app_session();
}
# Display checkpoint and temporary files report
if (!$disable_checkpoint) {
print $fh qq{
};
&print_checkpoint();
}
if (!$disable_temporary) {
print $fh qq{
};
# Show temporary files detailed information
&print_temporary_file();
# Show information about queries generating temporary files
&print_tempfile_report();
}
if (!$disable_autovacuum) {
print $fh qq{
};
# Show detailed vacuum/analyse information
&print_vacuum();
}
if (!$disable_lock) {
print $fh qq{
};
# Lock stats per type
&print_lock_type();
# Show lock wait detailed information
&print_lock_queries_report();
}
if (!$disable_query) {
print $fh qq{
};
# INSERT/DELETE/UPDATE/SELECT repartition
if (!$disable_type) {
&print_query_type();
# Show requests per database
&print_query_per_database();
# Show requests per user
&print_query_per_user();
# Show requests per host
&print_query_per_host();
# Show requests per application
&print_query_per_application();
;
# Show cancelled queries detailed information
&print_cancelled_queries();
# Show information about cancelled queries
&print_cancelled_report();
}
print $fh qq{
};
# Show histogram for query times
&print_histogram_query_times();
# Show top information
&print_slowest_individual_queries();
# Show queries that took up the most time
&print_time_consuming();
# Show most frequent queries
&print_most_frequent();
# Print normalized slowest queries
&print_slowest_queries
}
}
# Show errors report
if (!$disable_error) {
if (!$error_only) {
print $fh qq{
};
} else {
print $fh qq{
};
}
# Show log level distribution
&print_log_level();
# Show Most Frequent Errors/Events
&show_error_as_html();
}
# Dump the html footer
&html_footer();
}
sub url_escape
{
my $toencode = shift;
return if (!$toencode);
utf8::encode($toencode) if (($] >= 5.008) && utf8::is_utf8($toencode));
if (EBCDIC) {
$toencode =~ s/([^a-zA-Z0-9_.~-])/uc sprintf("%%%02x",$E2A[ord($1)])/eg;
} else {
$toencode =~ s/([^a-zA-Z0-9_.~-])/uc sprintf("%%%02x",ord($1))/eg;
}
return $toencode;
}
sub escape_html
{
$_[0] =~ s/<([\/a-zA-Z][\s\t\>]*)/\<$1/sg;
return $_[0];
}
sub print_log_level
{
my %infos = ();
# Some messages have seen their log level change during log parsing.
# Set the real log level count back
foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) {
next if (!$error_info{$k}{count});
if ($error_info{$k}{count} > 1) {
for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) {
if ( ($error_info{$k}{error}[$i] =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/)
|| ($error_info{$k}{error}[$i] =~ s/ERROR: (database system was shut down)/LOG: $1/)
|| ($error_info{$k}{error}[$i] =~ s/ERROR: (database system was interrupted while in recovery)/LOG: $1/)
|| ($error_info{$k}{error}[$i] =~ s/ERROR: (recovery has paused)/LOG: $1/))
{
$logs_type{ERROR}--;
$logs_type{LOG}++;
}
}
} else {
if ( ($error_info{$k}{error}[0] =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/)
|| ($error_info{$k}{error}[0] =~ s/ERROR: (database system was shut down)/LOG: $1/)
|| ($error_info{$k}{error}[0] =~ s/ERROR: (database system was interrupted while in recovery)/LOG: $1/)
|| ($error_info{$k}{error}[0] =~ s/ERROR: (recovery has paused)/LOG: $1/))
{
$logs_type{ERROR}--;
$logs_type{LOG}++;
}
}
}
# Show log types
my $total_logs = 0;
foreach my $d (sort keys %logs_type) {
$total_logs += $logs_type{$d};
}
my $logtype_info = '';
foreach my $d (sort keys %logs_type) {
next if (!$logs_type{$d});
$logtype_info .= "
};
}
$legend ||= 'Queries';
$legend = "{ data: d1, color: \"#6e9dc9\", mouse:{track:true}, bars: {show: true, shadowSize: 0}, },";
my $dateTracker_dataopts = '[ d1 ]';
my $data1 = "var d1 = [";
my $max = 0;
my $xmax = @$range + 1;
my $dateTracker_lblopts = "[ '0', ";
for (my $i = 1; $i <= $#{$range}; $i++) {
my $k = "$range->[$i-1]-$range->[$i]ms";
$max = $data{$k} if ($data{$k} > $max);
$data1 .= " [$i, $data{$k}],";
$data{"$histogram_query_time[$i-1]-$histogram_query_time[$i]ms"} = ($overall_stat{histogram}{query_time}{$histogram_query_time[$i-1]} || 0);
$dateTracker_lblopts .= "'" . &convert_time($range->[$i-1]) . '-' . &convert_time($range->[$i]) . "',";
}
$dateTracker_lblopts .= "'> " . &convert_time($range->[-1]) . "'";
$dateTracker_lblopts .= "]";
$data1 =~ s/,$//;
$data1 .= "];";
if ($max > 3) {
$max += int(($max*25)/100);
} else {
$max *= 2;
}
return <
EOF
}
sub build_log_line_prefix_regex
{
my %regex_map = (
'%a' => [('t_appname', '(.*)')], # application name
'%u' => [('t_dbuser', '([0-9a-zA-Z\_\[\]\-]*)')], # user name
'%d' => [('t_dbname', '([0-9a-zA-Z\_\[\]\-]*)')], # database name
'%r' => [('t_hostport', '([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?[\(\d\)]*')], # remote host and port
'%h' => [('t_client', '([a-zA-Z0-9\-\.]+|\[local\]|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)?')], # remote host
'%p' => [('t_pid', '(\d+)')], # process ID
'%t' => [('t_timestamp', '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})(?: [A-Z\+\-\d]{3,6})?')], # timestamp without milliseconds
'%m' => [('t_mtimestamp', '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d+(?: [A-Z\+\-\d]{3,6})?')], # timestamp with milliseconds
'%l' => [('t_session_line', '(\d+)')], # session line number
'%s' => [('t_session_timestamp', '(\d{4}-\d{2}-\d{2} \d{2}):\d{2}:\d{2}(?: [A-Z\d]{3,6})?')], # session start timestamp
'%c' => [('t_session_id', '([0-9a-f\.]*)')], # session ID
'%v' => [('t_virtual_xid', '([0-9a-f\.\/]*)')], # virtual transaction ID
'%x' => [('t_xid', '([0-9a-f\.\/]*)')], # transaction ID
'%i' => [('t_command', '([0-9a-zA-Z\.\-\_]*)')], # command tag
'%e' => [('t_sqlstate', '([0-9a-zA-Z]+)')], # SQL state
);
my @param_list = ();
$log_line_prefix =~ s/([\[\]\|\(\)\{\}])/\\$1/g;
$log_line_prefix =~ s/\%l([^\d])\d+/\%l$1\\d\+/;
$log_line_prefix =~ s/\%q//;
while ($log_line_prefix =~ s/(\%[audrhptmlscvxie])/$regex_map{"$1"}->[1]/) {
push(@param_list, $regex_map{"$1"}->[0]);
}
# replace %% by a single %
$log_line_prefix =~ s/\%\%/\%/;
return @param_list;
}
# Inclusion of Perl package SQL::Beautify
# Copyright (C) 2009 by Jonas Kramer
# Published under the terms of the Artistic License 2.0.
{
package SQL::Beautify;
use strict;
use warnings;
our $VERSION = 0.04;
use Carp;
# Keywords from SQL-92, SQL-99 and SQL-2003.
use constant KEYWORDS => qw(
ABSOLUTE ACTION ADD AFTER ALL ALLOCATE ALTER AND ANY ARE ARRAY AS ASC
ASENSITIVE ASSERTION ASYMMETRIC AT ATOMIC AUTHORIZATION AVG BEFORE BEGIN
BETWEEN BIGINT BINARY BIT BIT_LENGTH BLOB BOOLEAN BOTH BREADTH BY CALL
CALLED CASCADE CASCADED CASE CAST CATALOG CHAR CHARACTER CHARACTER_LENGTH
CHAR_LENGTH CHECK CLOB CLOSE COALESCE COLLATE COLLATION COLUMN COMMIT
CONDITION CONNECT CONNECTION CONSTRAINT CONSTRAINTS CONSTRUCTOR CONTAINS
CONTINUE CONVERT CORRESPONDING COUNT CREATE CROSS CUBE CURRENT CURRENT_DATE
CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_TIME
CURRENT_TIMESTAMP CURRENT_TRANSFORM_GROUP_FOR_TYPE CURRENT_USER CURSOR
CYCLE DATA DATE DAY DEALLOCATE DEC DECIMAL DECLARE DEFAULT DEFERRABLE
DEFERRED DELETE DEPTH DEREF DESC DESCRIBE DESCRIPTOR DETERMINISTIC
DIAGNOSTICS DISCONNECT DISTINCT DO DOMAIN DOUBLE DROP DYNAMIC EACH ELEMENT
ELSE ELSEIF END EPOCH EQUALS ESCAPE EXCEPT EXCEPTION EXEC EXECUTE EXISTS
EXIT EXTERNAL EXTRACT FALSE FETCH FILTER FIRST FLOAT FOR FOREIGN FOUND FREE
FROM FULL FUNCTION GENERAL GET GLOBAL GO GOTO GRANT GROUP GROUPING HANDLER
HAVING HOLD HOUR IDENTITY IF IMMEDIATE IN INDICATOR INITIALLY INNER INOUT
INPUT INSENSITIVE INSERT INT INTEGER INTERSECT INTERVAL INTO IS ISOLATION
ITERATE JOIN KEY LANGUAGE LARGE LAST LATERAL LEADING LEAVE LEFT LEVEL LIKE
LIMIT LOCAL LOCALTIME LOCALTIMESTAMP LOCATOR LOOP LOWER MAP MATCH MAX
MEMBER MERGE METHOD MIN MINUTE MODIFIES MODULE MONTH MULTISET NAMES
NATIONAL NATURAL NCHAR NCLOB NEW NEXT NO NONE NOT NULL NULLIF NUMERIC
OBJECT OCTET_LENGTH OF OLD ON ONLY OPEN OPTION OR ORDER ORDINALITY OUT
OUTER OUTPUT OVER OVERLAPS PAD PARAMETER PARTIAL PARTITION PATH POSITION
PRECISION PREPARE PRESERVE PRIMARY PRIOR PRIVILEGES PROCEDURE PUBLIC RANGE
READ READS REAL RECURSIVE REF REFERENCES REFERENCING RELATIVE RELEASE
REPEAT RESIGNAL RESTRICT RESULT RETURN RETURNS REVOKE RIGHT ROLE ROLLBACK
ROLLUP ROUTINE ROW ROWS SAVEPOINT SCHEMA SCOPE SCROLL SEARCH SECOND SECTION
SELECT SENSITIVE SESSION SESSION_USER SET SETS SIGNAL SIMILAR SIZE SMALLINT
SOME SPACE SPECIFIC SPECIFICTYPE SQL SQLCODE SQLERROR SQLEXCEPTION SQLSTATE
SQLWARNING START STATE STATIC SUBMULTISET SUBSTRING SUM SYMMETRIC SYSTEM
SYSTEM_USER TABLE TABLESAMPLE TEMPORARY TEXT THEN TIME TIMESTAMP
TIMEZONE_HOUR TIMEZONE_MINUTE TINYINT TO TRAILING TRANSACTION TRANSLATE
TRANSLATION TREAT TRIGGER TRIM TRUE UNDER UNDO UNION UNIQUE UNKNOWN UNNEST
UNTIL UPDATE UPPER USAGE USER USING VALUE VALUES VARCHAR VARYING VIEW WHEN
WHENEVER WHERE WHILE WINDOW WITH WITHIN WITHOUT WORK WRITE YEAR ZONE
);
sub tokenize_sql
{
my ($query, $remove_white_tokens) = @_;
my $re = qr{
(
(?:--)[\ \t\S]* # single line comments
|
(?:\#|\@\-\@|\@\@|\#\#|<\->|\&<|\&>|<<\||\|>>|\&<\||\|\&>|<\^|>\^|\?\#|\?\-|\?\||\?\-\||\?\|\||\@>|<\@|\~=)
# Geometric Operators
|
(?:\->>|\->|\#>|\?\&|\?) # Json Operators
|
(?:<>|<=>|>=|<=|==|=|!=|!|<<|>>|<|>|\|\||\||&&|&|-|\+|\*(?!/)|/(?!\*)|\%|~|\^|\?)
# operators and tests
|
[\[\]\(\),;.] # punctuation (parenthesis, comma)
|
\'\'(?!\') # empty single quoted string
|
\"\"(?!\"") # empty double quoted string
|
"(?>(?:(?>[^"\\]+)|""|\\.)*)+"
# anything inside double quotes, ungreedy
|
`(?>(?:(?>[^`\\]+)|``|\\.)*)+`
# anything inside backticks quotes, ungreedy
|
'(?>(?:(?>[^'\\]+)|''|\\.)*)+'
# anything inside single quotes, ungreedy.
|
/\*[\ \t\r\n\S]*?\*/ # C style comments
|
(?:[\w:@]+(?:\.(?:\w+|\*)?)*)
# words, standard named placeholders, db.table.*, db.*
|
(?: \$_\$ | \$\d+ | \${1,2})
# dollar expressions - eg $_$ $3 $$
|
\n # newline
|
[\t\ ]+ # any kind of white spaces
)
}smx;
my @query = ();
@query = $query =~ m{$re}smxg;
if ($remove_white_tokens) {
@query = grep(!/^[\s\n\r]*$/, @query);
}
return wantarray ? @query : \@query;
}
sub new
{
my ($class, %options) = @_;
my $self = bless {%options}, $class;
# Set some defaults.
$self->{query} = '' unless defined($self->{query});
$self->{spaces} = 4 unless defined($self->{spaces});
$self->{space} = ' ' unless defined($self->{space});
$self->{break} = "\n" unless defined($self->{break});
$self->{wrap} = {} unless defined($self->{wrap});
$self->{keywords} = [] unless defined($self->{keywords});
$self->{rules} = {} unless defined($self->{rules});
$self->{uc_keywords} = 0 unless defined $self->{uc_keywords};
push(@{$self->{keywords}}, KEYWORDS);
# Initialize internal stuff.
$self->{_level} = 0;
return $self;
}
# Add more SQL.
sub add
{
my ($self, $addendum) = @_;
$addendum =~ s/^\s*/ /;
$self->{query} .= $addendum;
}
# Set SQL to beautify.
sub query
{
my ($self, $query) = @_;
$self->{query} = $query if (defined($query));
return $self->{query};
}
# Beautify SQL.
sub beautify
{
my ($self) = @_;
$self->{_output} = '';
$self->{_level_stack} = [];
$self->{_new_line} = 1;
my $last = '';
$self->{_tokens} = [tokenize_sql($self->query, 1)];
while (defined(my $token = $self->_token)) {
my $rule = $self->_get_rule($token);
# Allow custom rules to override defaults.
if ($rule) {
$self->_process_rule($rule, $token);
}
elsif ($token eq '(') {
$self->_add_token($token);
$self->_new_line;
push @{$self->{_level_stack}}, $self->{_level};
$self->_over unless $last and uc($last) eq 'WHERE';
}
elsif ($token eq ')') {
# $self->_new_line;
$self->{_level} = pop(@{$self->{_level_stack}}) || 0;
$self->_add_token($token);
$self->_new_line if ($self->_next_token
and $self->_next_token !~ /^AS$/i
and $self->_next_token ne ')'
and $self->_next_token !~ /::/
and $self->_next_token ne ';'
and $self->_next_token ne ','
);
}
elsif ($token eq ',') {
$self->_add_token($token);
$self->_new_line;
}
elsif ($token eq ';') {
$self->_add_token($token);
$self->_new_line;
# End of statement; remove all indentation.
@{$self->{_level_stack}} = ();
$self->{_level} = 0;
}
elsif ($token =~ /^(?:SELECT|FROM|WHERE|HAVING|BEGIN|SET)$/i) {
$self->_back if ($last and $last ne '(' and $last ne 'FOR');
$self->_new_line;
$self->_add_token($token);
$self->_new_line if ((($token ne 'SET') || $last) and $self->_next_token and $self->_next_token ne '(' and $self->_next_token ne ';');
$self->_over;
}
elsif ($token =~ /^(?:GROUP|ORDER|LIMIT)$/i) {
$self->_back;
$self->_new_line;
$self->_add_token($token);
}
elsif ($token =~ /^(?:BY)$/i) {
$self->_add_token($token);
$self->_new_line;
$self->_over;
}
elsif ($token =~ /^(?:CASE)$/i) {
$self->_add_token($token);
$self->_over;
}
elsif ($token =~ /^(?:WHEN)$/i) {
$self->_new_line;
$self->_add_token($token);
}
elsif ($token =~ /^(?:ELSE)$/i) {
$self->_new_line;
$self->_add_token($token);
}
elsif ($token =~ /^(?:END)$/i) {
$self->_back;
$self->_new_line;
$self->_add_token($token);
}
elsif ($token =~ /^(?:UNION|INTERSECT|EXCEPT)$/i) {
$self->_back unless $last and $last eq '(';
$self->_new_line;
$self->_add_token($token);
$self->_new_line if ($self->_next_token and $self->_next_token ne '(');
$self->_over;
}
elsif ($token =~ /^(?:LEFT|RIGHT|INNER|OUTER|CROSS)$/i) {
$self->_back;
$self->_new_line;
$self->_add_token($token);
$self->_over;
}
elsif ($token =~ /^(?:JOIN)$/i) {
if ($last and $last !~ /^(?:LEFT|RIGHT|INNER|OUTER|CROSS)$/) {
$self->_new_line;
}
$self->_add_token($token);
}
elsif ($token =~ /^(?:AND|OR)$/i) {
$self->_new_line;
$self->_add_token($token);
# $self->_new_line;
}
elsif ($token =~ /^--/) {
if (!$self->{no_comments}) {
$self->_add_token($token);
$self->_new_line;
}
}
elsif ($token =~ /^\/\*.*\*\/$/s) {
if (!$self->{no_comments}) {
$token =~ s/\n[\s\t]+\*/\n\*/gs;
$self->_new_line;
$self->_add_token($token);
$self->_new_line;
}
}
else {
$self->_add_token($token, $last);
}
$last = $token;
}
$self->_new_line;
$self->{_output};
}
# Add a token to the beautified string.
sub _add_token
{
my ($self, $token, $last_token) = @_;
if ($self->{wrap}) {
my $wrap;
if ($self->_is_keyword($token)) {
$wrap = $self->{wrap}->{keywords};
} elsif ($self->_is_constant($token)) {
$wrap = $self->{wrap}->{constants};
}
if ($wrap) {
$token = $wrap->[0] . $token . $wrap->[1];
}
}
my $last_is_dot = defined($last_token) && $last_token eq '.';
if (!$self->_is_punctuation($token) and !$last_is_dot) {
$self->{_output} .= $self->_indent;
}
# uppercase keywords
$token = uc $token
if $self->_is_keyword($token)
and $self->{uc_keywords};
$self->{_output} .= $token;
# This can't be the beginning of a new line anymore.
$self->{_new_line} = 0;
}
# Increase the indentation level.
sub _over
{
my ($self) = @_;
++$self->{_level};
}
# Decrease the indentation level.
sub _back
{
my ($self) = @_;
--$self->{_level} if ($self->{_level} > 0);
}
# Return a string of spaces according to the current indentation level and the
# spaces setting for indenting.
sub _indent
{
my ($self) = @_;
if ($self->{_new_line}) {
return $self->{space} x ($self->{spaces} * $self->{_level});
} else {
return $self->{space};
}
}
# Add a line break, but make sure there are no empty lines.
sub _new_line
{
my ($self) = @_;
$self->{_output} .= $self->{break} unless ($self->{_new_line});
$self->{_new_line} = 1;
}
# Have a look at the token that's coming up next.
sub _next_token
{
my ($self) = @_;
return @{$self->{_tokens}} ? $self->{_tokens}->[0] : undef;
}
# Get the next token, removing it from the list of remaining tokens.
sub _token
{
my ($self) = @_;
return shift @{$self->{_tokens}};
}
# Check if a token is a known SQL keyword.
sub _is_keyword
{
my ($self, $token) = @_;
return ~~ grep {$_ eq uc($token)} @{$self->{keywords}};
}
# Add new keywords to highlight.
sub add_keywords
{
my $self = shift;
for my $keyword (@_) {
push @{$self->{keywords}}, ref($keyword) ? @{$keyword} : $keyword;
}
}
# Add new rules.
sub add_rule
{
my ($self, $format, $token) = @_;
my $rules = $self->{rules} ||= {};
my $group = $rules->{$format} ||= [];
push @{$group}, ref($token) ? @{$token} : $token;
}
# Find custom rule for a token.
sub _get_rule
{
my ($self, $token) = @_;
values %{$self->{rules}}; # Reset iterator.
while (my ($rule, $list) = each %{$self->{rules}}) {
return $rule if (grep {uc($token) eq uc($_)} @$list);
}
return undef;
}
sub _process_rule
{
my ($self, $rule, $token) = @_;
my $format = {
break => sub {$self->_new_line},
over => sub {$self->_over},
back => sub {$self->_back},
token => sub {$self->_add_token($token)},
push => sub {push @{$self->{_level_stack}}, $self->{_level}},
pop => sub {$self->{_level} = pop(@{$self->{_level_stack}}) || 0},
reset => sub {$self->{_level} = 0; @{$self->{_level_stack}} = ();},
};
for (split /-/, lc $rule) {
&{$format->{$_}} if ($format->{$_});
}
}
# Check if a token is a constant.
sub _is_constant
{
my ($self, $token) = @_;
return ($token =~ /^\d+$/ or $token =~ /^(['"`]).*\1$/);
}
# Check if a token is punctuation.
sub _is_punctuation
{
my ($self, $token) = @_;
return ($token =~ /^[,;.]$/);
}
}
sub get_log_file
{
my $logf = shift;
my $lfile = undef;
# get file size
my $totalsize = 0;
if (!$remote_host) {
$totalsize = (stat("$logf"))[7] || 0 if ($logf ne '-');
} elsif ($logf !~ /\.(gz|bz2|zip|xz)$/i) {
&logmsg('DEBUG', "Looking for file size using command: $ssh_command \"ls -l $logf\" | awk '{print \$5}'");
$totalsize = `$ssh_command "ls -l $logf" | awk '{print \$5}'`;
chomp($totalsize);
if ($totalsize eq '') {
die "FATAL: can't get size of remote file, please check what's going wrong with command: $ssh_command \"ls -l $logf\" | awk '{print \$5}'\n";
}
&logmsg('DEBUG', "Remote file size: $totalsize");
if (!$totalsize) {
return $totalsize;
}
}
my $iscompressed = 1;
# Open a file handle
if ($logf !~ /\.(gz|bz2|zip|xz)$/i) {
if (!$remote_host) {
open($lfile, $logf) || die "FATAL: cannot read log file $logf. $!\n";
} else {
# Open a pipe to zcat program for compressed log
open($lfile,"$ssh_command \"cat $logf\" |") || die "FATAL: cannot read from pipe to $ssh_command \"cat $logf\". $!\n";
}
$totalsize = 0 if ($logf eq '-');
$iscompressed = 0;
} else {
my $uncompress = $zcat;
if (($logf =~ /\.bz2/i) && ($zcat =~ /^$zcat_cmd$/)) {
$uncompress = $bzcat;
} elsif (($logf =~ /\.zip/i) && ($zcat =~ /^$zcat_cmd$/)) {
$uncompress = $ucat;
}
elsif (($logf =~ /\.xz/i) && ($zcat =~ /^$zcat_cmd$/)) {
$uncompress = $xzcat;
}
if (!$remote_host) {
&logmsg('DEBUG', "Compressed log file, will use command: $uncompress \"$logf\"");
# Open a pipe to zcat program for compressed log
open($lfile,"$uncompress \"$logf\" |") || die "FATAL: cannot read from pipe to $uncompress \"$logf\". $!\n";
} else {
&logmsg('DEBUG', "Compressed log file, will use command: $ssh_command \"$uncompress $logf\"");
# Open a pipe to zcat program for compressed log
open($lfile,"$ssh_command \"$uncompress $logf\" |") || die "FATAL: cannot read from pipe to $ssh_command \"$uncompress $logf\". $!\n";
}
# Real size of the file is unknown, try to find it
# bz2 does not report real size
$totalsize = 0;
if ($logf =~ /\.(gz|zip|xz)$/i) {
my $cmd_file_size = $gzip_uncompress_size;
if ($logf =~ /\.zip$/i) {
$cmd_file_size = $zip_uncompress_size;
} elsif ($logf =~ /\.xz$/i) {
$cmd_file_size = $xz_uncompress_size;
}
$cmd_file_size =~ s/\%f/$logf/g;
if (!$remote_host) {
&logmsg('DEBUG', "Looking for file size using command: $cmd_file_size");
$totalsize = `$cmd_file_size`;
} else {
&logmsg('DEBUG', "Looking for remote file size using command: $ssh_command $cmd_file_size");
$totalsize = `$ssh_command $cmd_file_size`;
}
chomp($totalsize);
}
$queue_size = 0;
}
# In list context returns the filehandle and the size of the file
if (wantarray()) {
return ($lfile, $totalsize, $iscompressed);
}
# In scalar context return size only
close($lfile);
return $totalsize;
}
sub split_logfile
{
my $logf = shift;
# CSV file can't be parsed using multiprocessing
return (0, -1) if ( $format eq 'csv' );
# get file size
my $totalsize = (stat("$logf"))[7] || 0;
# Real size of the file is unknown, try to find it
# bz2 does not report real size
if ($totalsize <= 16777216) { #16MB
#If the file is very small, many jobs actually make the parsing take longer
#What is an acceptable file size????
$queue_size = 0;
} elsif ($logf =~ /\.(gz|zip|xz)$/i) {
$totalsize = 0;
my $cmd_file_size = $gzip_uncompress_size;
if ($logf =~ /\.zip$/i) {
$cmd_file_size = $zip_uncompress_size;
} elsif ($logf =~ /\.xz$/i) {
$cmd_file_size = $xz_uncompress_size;
}
$cmd_file_size =~ s/\%f/$logf/g;
$totalsize = `$cmd_file_size`;
chomp($totalsize);
$queue_size = 0;
} elsif ($logf =~ /\.bz2$/i) {
$totalsize = 0;
$queue_size = 0;
}
return (0, -1) if (!$totalsize);
my @chunks = (0);
my $i = 1;
if ($last_parsed && $saved_last_line{current_pos} && ($saved_last_line{current_pos} < $totalsize)) {
$chunks[0] = $saved_last_line{current_pos};
}
my ($lfile, $null) = &get_log_file($logf); # Get file handle to the file
while ($i < $queue_size) {
my $pos = int(($totalsize/$queue_size) * $i);
if ($pos > $chunks[0]) {
$lfile->seek($pos, 0);
#Move the offset to the BEGINNING of each line, because the logic in process_file requires so
$pos= $pos + length(<$lfile>) - 1;
push(@chunks, $pos) if ($pos < $totalsize);
}
last if ($pos >= $totalsize);
$i++;
}
$lfile->close();
push(@chunks, $totalsize);
return @chunks;
}
# Return the week number of the year for a given date
sub get_week_number
{
my ($year, $month, $day) = @_;
# %U The week number of the current year as a decimal number, range 00 to 53, starting with the first
# Sunday as the first day of week 01.
# %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53,
# where week 1 is the first week that has at least 4 days in the new year.
# %W The week number of the current year as a decimal number, range 00 to 53, starting with the first
# Monday as the first day of week 01.
# Check if the date is valid first
my $datefmt = POSIX::strftime("%Y-%m-%d", 1, 1, 1, $day, $month - 1, $year - 1900);
if ($datefmt ne "$year-$month-$day") {
return -1;
}
my $weekNumber = POSIX::strftime("%U", 1, 1, 1, $day, $month - 1, $year - 1900);
return sprintf("%02d", $weekNumber+1);
}
# Returns day number of the week of a given days
sub get_day_of_week
{
my ($year, $month, $day) = @_;
# %w The day of the week as a decimal, range 0 to 6, Sunday being 0.
my $weekDay = POSIX::strftime("%w", 1,1,1,$day,--$month,$year-1900);
return $weekDay;
}
# Returns all days following the week number
sub get_wdays_per_month
{
my $wn = shift;
my ($year, $month) = split(/\-/, shift);
my @months = ();
my @retdays = ();
$month ||= '01';
push(@months, "$year$month");
my $start_month = $month;
if ($month eq '01') {
unshift(@months, ($year - 1) . "12");
} else {
unshift(@months, $year . sprintf("%02d", $month - 1));
}
if ($month == 12) {
push(@months, ($year+1) . "01");
} else {
push(@months, $year . sprintf("%02d", $month + 1));
}
foreach my $d (@months) {
$d =~ /^(\d{4})(\d{2})$/;
my $y = $1;
my $m = $2;
foreach my $day ("01" .. "31") {
# Check if the date is valid first
my $datefmt = POSIX::strftime("%Y-%m-%d", 1, 1, 1, $day, $m - 1, $y - 1900);
if ($datefmt ne "$y-$m-$day") {
next;
}
my $weekNumber = POSIX::strftime("%U", 1, 1, 1, $day, $m - 1, $y - 1900);
if ( ($weekNumber == $wn) || ( ($weekNumber eq '00') && (($wn == 1) || ($wn >= 52)) ) ) {
push(@retdays, "$year-$m-$day");
return @retdays if ($#retdays == 6);
}
next if ($weekNumber > $wn);
}
}
return @retdays;
}
sub IsLeapYear
{
return ((($_[0] & 3) == 0) && (($_[0] % 100 != 0) || ($_[0] % 400 == 0)));
}
####
# Display calendar
####
sub get_calendar
{
my ($year, $month) = @_;
my $str = "
\n";
my @wday = qw(Su Mo Tu We Th Fr Sa);
#my @wday = ('Mon','Tue','Wed','Thu','Fri','Sat','Sun');
my @std_day = qw(Su Mo Tu We Th Fr Sa);
my %day_lbl = ();
for (my $i = 0; $i <= $#wday; $i++) {
$day_lbl{$wday[$i]} = $wday[$i];
}
$str .= "
";
map { $str .= '
' . $day_lbl{$_} . '
'; } @wday;
$str .= "
\n\n";
my @currow = ('','','','','','','');
my $d = '';
my $wd = 0;
my $wn = 0;
my $week = '';
for $d ("01" .. "31") {
last if (($d == 31) && grep(/^$month$/, '04','06','09','11'));
last if (($d == 30) && ($month eq '02'));
last if (($d == 29) && ($month eq '02') && !&IsLeapYear($year));
$wd = &get_day_of_week($year,$month,$d);
$wn = &get_week_number($year,$month,$d);
next if ($wn == -1);
if ( !-e "$outdir/$year/$month/$d/index.html" ) {
$currow[$wd] = "
On '+dateToDisplay.toGMTString();
for (var i = 0; i < labels.length; i++) {
if (datasets[i] != undefined) {
textToShow += ' '+pretty_print_number(datasets[i][pos][1], gtype)+' '+labels[i]+'';
}
}
textToShow += '
';
return textToShow;
}
function histoHourTracker(obj, labels, datasets)
{
var dateToDisplay = new Date(parseInt(obj.x));
var posValue = parseInt(obj.x);
// look for the position in data arrays
var pos = 0;
if (datasets != undefined) {
for (pos=0; pos < datasets[0].length; pos++) {
// If timestamp are the same we have found the position
if (datasets[0][pos][0] == posValue) {
// get out of here
break;
}
}
} else {
return 'NO DATASET';
}
var textToShow = '
At '+dateToDisplay.toGMTString().substr(17, 5);
for (var i = 0; i < labels.length; i++) {
if (datasets[i] != undefined) {
textToShow += ' '+pretty_print_number(datasets[i][pos][1])+' '+labels[i]+'';
}
}
textToShow += '
';
return textToShow;
}
function histoDurationTracker(obj, labels, datasets)
{
var posValue = parseInt(obj.x);
// look for the position in data arrays
var pos = 0;
if (datasets != undefined) {
for (pos=0; pos < datasets[0].length; pos++) {
// If timestamp are the same we have found the position
if (datasets[0][pos][0] == posValue) {
// get out of here
break;
}
}
} else {
return 'NO DATASET';
}
var textToShow = '
';
for (var i = 0; i < labels.length; i++) {
if (datasets[i] != undefined) {
textToShow += ''+pretty_print_number(datasets[i][pos][1])+' '+labels[pos+1]+'';
}
}
textToShow += '
';
return textToShow;
}
function pretty_print_number(val, type)
{
if (type == 'size') {
if (val >= 1125899906842624) {
val = (val / 1125899906842624);
val = val.toFixed(2) + " PiB";
} else if (val >= 1099511627776) {
val = (val / 1099511627776);
val = val.toFixed(2) + " TiB";
} else if (val >= 1073741824) {
val = (val / 1073741824);
val = val.toFixed(2) + " GiB";
} else if (val >= 1048576) {
val = (val / 1048576);
val = val.toFixed(2) + " MiB";
} else if (val >= 1024) {
val = (val / 1024);
val = val.toFixed(2) + " KiB";
} else {
val = val + " B";
}
} else if (type == 'duration') {
if (val >= 1000) {
val = (val / 1000);
val = val.toFixed(3) + " sec";
} else {
val = val + " ms";
}
} else {
if (val >= 1000000000000000) {
val = (val / 1000000000000000);
val = val.toFixed(2) + " P";
} else if (val >= 1000000000000) {
val = (val / 1000000000000);
val = val.toFixed(2) + " T";
} else if (val >= 1000000000) {
val = (val / 1000000000);
val = val.toFixed(2) + " G";
} else if (val >= 1000000) {
val = (val / 1000000);
val = val.toFixed(2) + " M";
} else if (val >= 1000) {
val = (val / 1000);
val = val.toFixed(2) + " K";
}
}
return val;
}
function format_number(val) {
var decimal = 2;
var msep = ',';
var deci = Math.round( Math.pow(10,decimal)*(Math.abs(val)-Math.floor(Math.abs(val)))) ;
val = Math.floor(Math.abs(val));
if ((decimal==0)||(deci==Math.pow(10,decimal))) {deci=0;}
var val_format=val+"";
var nb=val_format.length;
for (var i=1;i<4;i++) {
if (val>=Math.pow(10,(3*i))) {
val_format=val_format.substring(0,nb-(3*i))+msep+val_format.substring(nb-(3*i));
}
}
if (decimal>0) {
var decim="";
for (var j=0;j<(decimal-deci.toString().length);j++) {decim+="0";}
deci=decim+deci.toString();
if (deci > 0) {
val_format=val_format+"."+deci;
}
}
if (parseFloat(val)<0) {val_format="-"+val_format;}
return val_format;
}
function pieTracker(obj, utype)
{
var textToShow = '';
if (utype == 'duration') {
textToShow += pretty_print_number(obj.y, utype);
} else {
textToShow += format_number(obj.y);
}
textToShow += ' '+obj.series.label+'';
return textToShow;
}
WRFILE: bean.js
/*!
* bean.js - copyright Jacob Thornton 2011
* https://github.com/fat/bean
* MIT License
* special thanks to:
* dean edwards: http://dean.edwards.name/
* dperini: https://github.com/dperini/nwevents
* the entire mootools team: github.com/mootools/mootools-core
*//*global module:true, define:true*/
!function(e,t,n){typeof module!="undefined"?module.exports=n(e,t):typeof define=="function"&&typeof define.amd=="object"?define(n):t[e]=n(e,t)}("bean",this,function(e,t){var n=window,r=t[e],i=/over|out/,s=/[^\.]*(?=\..*)\.|.*/,o=/\..*/,u="addEventListener",a="attachEvent",f="removeEventListener",l="detachEvent",c=document||{},h=c.documentElement||{},p=h[u],d=p?u:a,v=Array.prototype.slice,m=/click|mouse|menu|drag|drop/i,g=/^touch|^gesture/i,y={one:1},b=function(e,t,n){for(n=0;n0){t=t.split(" ");for(f=t.length;f--;)_(e,t[f],n);return e}u=c&&t.replace(o,""),u&&w[u]&&(u=w[u].type);if(!t||c){if(a=c&&t.replace(s,""))a=a.split(".");l(e,u,n,a)}else if(typeof t=="function")l(e,null,t);else for(r in t)t.hasOwnProperty(r)&&_(e,r,t[r]);return e},D=function(e,t,n,r,i){var s,o,u,a,f=n,l=n&&typeof n=="string";if(t&&!n&&typeof t=="object")for(s in t)t.hasOwnProperty(s)&&D.apply(this,[e,s,t[s]]);else{a=arguments.length>3?v.call(arguments,3):[],o=(l?n:t).split(" "),l&&(n=M(t,f=r,i))&&(a=v.call(a,1)),this===y&&(n=L(_,e,t,n,f));for(u=o.length;u--;)O(e,o[u],n,f,a)}return e},P=function(){return D.apply(y,arguments)},H=p?function(e,t,r){var i=c.createEvent(e?"HTMLEvents":"UIEvents");i[e?"initEvent":"initUIEvent"](t,!0,!0,n,1),r.dispatchEvent(i)}:function(e,t,n){n=S(n,e),e?n.fireEvent("on"+t,c.createEventObject()):n["_on"+t]++},B=function(e,t,n){var r,i,u,a,f,l=t.split(" ");for(r=l.length;r--;){t=l[r].replace(o,"");if(a=l[r].replace(s,""))a=a.split(".");if(!a&&!n&&e[d])H(b[t],t,e);else{f=T.get(e,t),n=[!1].concat(n);for(i=0,u=f.length;i=r.computed&&(r={value:e,computed:o})}),r.value},S.min=function(e,t,n){if(!t&&S.isArray(e))return Math.min.apply(Math,e);var r={computed:Infinity};return x(e,function(e,i,s){var o=t?t.call(n,e,i,s):e;or?1:0}),"value")},S.groupBy=function(e,t){var n={};return x(e,function(e,r){var i=t(e,r);(n[i]||(n[i]=[])).push(e)}),n},S.sortedIndex=function(e,t,n){n||(n=S.identity);var r=0,i=e.length;while(r>1;n(e[s])=0})})},S.difference=function(e,t){return S.filter(e,function(e){return!S.include(t,e)})},S.zip=function(){var e=o.call(arguments),t=S.max(S.pluck(e,"length")),n=new Array(t);for(var r=0;r=0;n--)t=[e[n].apply(this,t)];return t[0]}},S.after=function(e,t){return function(){if(--e<1)return t.apply(this,arguments)}},S.keys=w||function(e){if(e!==Object(e))throw new TypeError("Invalid object");var t=[];for(var n in e)f.call(e,n)&&(t[t.length]=n);return t},S.values=function(e){return S.map(e,S.identity)},S.functions=S.methods=function(e){var t=[];for(var n in e)S.isFunction(e[n])&&t.push(n);return t.sort()},S.extend=function(e){return x(o.call(arguments,1),function(t){for(var n in t)t[n]!==void 0&&(e[n]=t[n])}),e},S.defaults=function(e){return x(o.call(arguments,1),function(t){for(var n in t)e[n]==null&&(e[n]=t[n])}),e},S.clone=function(e){return S.isArray(e)?e.slice():S.extend({},e)},S.tap=function(e,t){return t(e),e},S.isEqual=function(e,t){if(e===t)return!0;var n=typeof e,r=typeof t;if(n!=r)return!1;if(e==t)return!0;if(!e&&t||e&&!t)return!1;e._chain&&(e=e._wrapped),t._chain&&(t=t._wrapped);if(e.isEqual)return e.isEqual(t);if(t.isEqual)return t.isEqual(e);if(S.isDate(e)&&S.isDate(t))return e.getTime()===t.getTime();if(S.isNaN(e)&&S.isNaN(t))return!1;if(S.isRegExp(e)&&S.isRegExp(t))return e.source===t.source&&e.global===t.global&&e.ignoreCase===t.ignoreCase&&e.multiline===t.multiline;if(n!=="object")return!1;if(e.length&&e.length!==t.length)return!1;var i=S.keys(e),s=S.keys(t);if(i.length!=s.length)return!1;for(var o in e)if(!(o in t)||!S.isEqual(e[o],t[o]))return!1;return!0},S.isEmpty=function(e){if(S.isArray(e)||S.isString(e))return e.length===0;for(var t in e)if(f.call(e,t))return!1;return!0},S.isElement=function(e){return!!e&&e.nodeType==1},S.isArray=b||function(e){return a.call(e)==="[object Array]"},S.isObject=function(e){return e===Object(e)},S.isArguments=function(e){return!!e&&!!f.call(e,"callee")},S.isFunction=function(e){return!!(e&&e.constructor&&e.call&&e.apply)},S.isString=function(e){return!!(e===""||e&&e.charCodeAt&&e.substr)},S.isNumber=function(e){return!!(e===0||e&&e.toExponential&&e.toFixed)},S.isNaN=function(e){return e!==e},S.isBoolean=function(e){return e===!0||e===!1},S.isDate=function(e){return!!(e&&e.getTimezoneOffset&&e.setUTCFullYear)},S.isRegExp=function(e){return!(!(e&&e.test&&e.exec)||!e.ignoreCase&&e.ignoreCase!==!1)},S.isNull=function(e){return e===null},S.isUndefined=function(e){return e===void 0},S.noConflict=function(){return e._=t,this},S.identity=function(e){return e},S.times=function(e,t,n){for(var r=0;r/g,interpolate:/<%=([\s\S]+?)%>/g},S.template=function(e,t){var n=S.templateSettings,r="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+e.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(n.interpolate,function(e,t){return"',"+t.replace(/\\'/g,"'")+",'"}).replace(n.evaluate||null,function(e,t){return"');"+t.replace(/\\'/g,"'").replace(/[\r\n\t]/g," ")+"__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",i=new Function("obj",r);return t?i(t):i};var k=function(e){this._wrapped=e};S.prototype=k.prototype;var L=function(e,t){return t?S(e).chain():e},A=function(e,t){k.prototype[e]=function(){var e=o.call(arguments);return u.call(e,this._wrapped),L(t.apply(S,e),this._chain)}};S.mixin(S),x(["pop","push","reverse","shift","sort","splice","unshift"],function(e){var t=r[e];k.prototype[e]=function(){return t.apply(this._wrapped,arguments),L(this._wrapped,this._chain)}}),x(["concat","join","slice"],function(e){var t=r[e];k.prototype[e]=function(){return L(t.apply(this._wrapped,arguments),this._chain)}}),k.prototype.chain=function(){return this._chain=!0,this},k.prototype.value=function(){return this._wrapped}})();
WRFILE: flotr2.js
/**
* Flotr2 (c) 2012 Carl Sutherland
* MIT License
* Special thanks to:
* Flotr: http://code.google.com/p/flotr/ (fork)
* Flot: https://github.com/flot/flot (original fork)
*/
(function(){var e=this,t=this.Flotr,n;n={_:_,bean:bean,isIphone:/iphone/i.test(navigator.userAgent),isIE:navigator.appVersion.indexOf("MSIE")!=-1?parseFloat(navigator.appVersion.split("MSIE")[1]):!1,graphTypes:{},plugins:{},addType:function(e,t){n.graphTypes[e]=t,n.defaultOptions[e]=t.options||{},n.defaultOptions.defaultType=n.defaultOptions.defaultType||e},addPlugin:function(e,t){n.plugins[e]=t,n.defaultOptions[e]=t.options||{}},draw:function(e,t,r,i){return i=i||n.Graph,new i(e,t,r)},merge:function(e,t){var r,i,s=t||{};for(r in e)i=e[r],i&&typeof i=="object"?i.constructor===Array?s[r]=this._.clone(i):i.constructor!==RegExp&&!this._.isElement(i)&&!i.jquery?s[r]=n.merge(i,t?t[r]:undefined):s[r]=i:s[r]=i;return s},clone:function(e){return n.merge(e,{})},getTickSize:function(e,t,r,i){var s=(r-t)/e,o=n.getMagnitude(s),u=10,a=s/o;return a<1.5?u=1:a<2.25?u=2:a<3?u=i===0?2:2.5:a<7.5&&(u=5),u*o},defaultTickFormatter:function(e,t){return e+""},defaultTrackFormatter:function(e){return"("+e.x+", "+e.y+")"},engineeringNotation:function(e,t,n){var r=["Y","Z","E","P","T","G","M","k",""],i=["y","z","a","f","p","n","µ","m",""],s=r.length;n=n||1e3,t=Math.pow(10,t||2);if(e===0)return 0;if(e>1)while(s--&&e>=n)e/=n;else{r=i,s=r.length;while(s--&&e<1)e*=n}return Math.round(e*t)/t+r[s]},getMagnitude:function(e){return Math.pow(10,Math.floor(Math.log(e)/Math.LN10))},toPixel:function(e){return Math.floor(e)+.5},toRad:function(e){return-e*(Math.PI/180)},floorInBase:function(e,t){return t*Math.floor(e/t)},drawText:function(e,t,r,i,s){if(!e.fillText){e.drawText(t,r,i,s);return}s=this._.extend({size:n.defaultOptions.fontSize,color:"#000000",textAlign:"left",textBaseline:"bottom",weight:1,angle:0},s),e.save(),e.translate(r,i),e.rotate(s.angle),e.fillStyle=s.color,e.font=(s.weight>1?"bold ":"")+s.size*1.3+"px sans-serif",e.textAlign=s.textAlign,e.textBaseline=s.textBaseline,e.fillText(t,0,0),e.restore()},getBestTextAlign:function(e,t){return t=t||{textAlign:"center",textBaseline:"middle"},e+=n.getTextAngleFromAlign(t),Math.abs(Math.cos(e))>.01&&(t.textAlign=Math.cos(e)>0?"right":"left"),Math.abs(Math.sin(e))>.01&&(t.textBaseline=Math.sin(e)>0?"top":"bottom"),t},alignTable:{"right middle":0,"right top":Math.PI/4,"center top":Math.PI/2,"left top":3*(Math.PI/4),"left middle":Math.PI,"left bottom":-3*(Math.PI/4),"center bottom":-Math.PI/2,"right bottom":-Math.PI/4,"center middle":0},getTextAngleFromAlign:function(e){return n.alignTable[e.textAlign+" "+e.textBaseline]||0},noConflict:function(){return e.Flotr=t,this}},e.Flotr=n})(),Flotr.defaultOptions={colors:["#00A8F0","#C0D800","#CB4B4B","#4DA74D","#9440ED"],ieBackgroundColor:"#FFFFFF",title:null,subtitle:null,shadowSize:4,defaultType:null,HtmlText:!0,fontColor:"#545454",fontSize:7.5,resolution:1,parseFloat:!0,preventDefault:!0,xaxis:{ticks:null,minorTicks:null,showLabels:!0,showMinorLabels:!1,labelsAngle:0,title:null,titleAngle:0,noTicks:5,minorTickFreq:null,tickFormatter:Flotr.defaultTickFormatter,tickDecimals:null,min:null,max:null,autoscale:!1,autoscaleMargin:0,color:null,mode:"normal",timeFormat:null,timeMode:"UTC",timeUnit:"millisecond",scaling:"linear",base:Math.E,titleAlign:"center",margin:!0},x2axis:{},yaxis:{ticks:null,minorTicks:null,showLabels:!0,showMinorLabels:!1,labelsAngle:0,title:null,titleAngle:90,noTicks:5,minorTickFreq:null,tickFormatter:Flotr.defaultTickFormatter,tickDecimals:null,min:null,max:null,autoscale:!1,autoscaleMargin:0,color:null,scaling:"linear",base:Math.E,titleAlign:"center",margin:!0},y2axis:{titleAngle:270},grid:{color:"#545454",backgroundColor:null,backgroundImage:null,watermarkAlpha:.4,tickColor:"#DDDDDD",labelMargin:3,verticalLines:!0,minorVerticalLines:null,horizontalLines:!0,minorHorizontalLines:null,outlineWidth:1,outline:"nsew",circular:!1},mouse:{track:!1,trackAll:!1,position:"se",relative:!1,trackFormatter:Flotr.defaultTrackFormatter,margin:5,lineColor:"#FF3F19",trackDecimals:1,sensibility:2,trackY:!0,radius:3,fillColor:null,fillOpacity:.4}},function(){function t(e,t,n,r){this.rgba=["r","g","b","a"];var i=4;while(-1<--i)this[this.rgba[i]]=arguments[i]||(i==3?1:0);this.normalize()}var e=Flotr._,n={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]};t.prototype={scale:function(t,n,r,i){var s=4;while(-1<--s)e.isUndefined(arguments[s])||(this[this.rgba[s]]*=arguments[s]);return this.normalize()},alpha:function(t){return!e.isUndefined(t)&&!e.isNull(t)&&(this.a=t),this.normalize()},clone:function(){return new t(this.r,this.b,this.g,this.a)},limit:function(e,t,n){return Math.max(Math.min(e,n),t)},normalize:function(){var e=this.limit;return this.r=e(parseInt(this.r,10),0,255),this.g=e(parseInt(this.g,10),0,255),this.b=e(parseInt(this.b,10),0,255),this.a=e(this.a,0,1),this},distance:function(e){if(!e)return;e=new t.parse(e);var n=0,r=3;while(-1<--r)n+=Math.abs(this[this.rgba[r]]-e[this.rgba[r]]);return n},toString:function(){return this.a>=1?"rgb("+[this.r,this.g,this.b].join(",")+")":"rgba("+[this.r,this.g,this.b,this.a].join(",")+")"},contrast:function(){var e=1-(.299*this.r+.587*this.g+.114*this.b)/255;return e<.5?"#000000":"#ffffff"}},e.extend(t,{parse:function(e){if(e instanceof t)return e;var r;if(r=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(e))return new t(parseInt(r[1],16),parseInt(r[2],16),parseInt(r[3],16));if(r=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(e))return new t(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10));if(r=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(e))return new t(parseInt(r[1]+r[1],16),parseInt(r[2]+r[2],16),parseInt(r[3]+r[3],16));if(r=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(e))return new t(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10),parseFloat(r[4]));if(r=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(e))return new t(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55);if(r=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(e))return new t(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55,parseFloat(r[4]));var i=(e+"").replace(/^\s*([\S\s]*?)\s*$/,"$1").toLowerCase();return i=="transparent"?new t(255,255,255,0):(r=n[i])?new t(r[0],r[1],r[2]):new t(0,0,0,0)},processColor:function(n,r){var i=r.opacity;if(!n)return"rgba(0, 0, 0, 0)";if(n instanceof t)return n.alpha(i).toString();if(e.isString(n))return t.parse(n).alpha(i).toString();var s=n.colors?n:{colors:n};if(!r.ctx)return e.isArray(s.colors)?t.parse(e.isArray(s.colors[0])?s.colors[0][1]:s.colors[0]).alpha(i).toString():"rgba(0, 0, 0, 0)";s=e.extend({start:"top",end:"bottom"},s),/top/i.test(s.start)&&(r.x1=0),/left/i.test(s.start)&&(r.y1=0),/bottom/i.test(s.end)&&(r.x2=0),/right/i.test(s.end)&&(r.y2=0);var o,u,a,f=r.ctx.createLinearGradient(r.x1,r.y1,r.x2,r.y2);for(o=0;o=h)break}h=i[v][0],p=i[v][1],p=="year"&&(h=Flotr.getTickSize(s.noTicks*r.year,a,f,0),h==.5&&(p="month",h=6)),e.tickUnit=p,e.tickSize=h;var g=h*r[p];m=new Date(a);switch(p){case"millisecond":y("Milliseconds");break;case"second":y("Seconds");break;case"minute":y("Minutes");break;case"hour":y("Hours");break;case"month":y("Month");break;case"year":y("FullYear")}g>=r.second&&t(m,"Milliseconds",o,0),g>=r.minute&&t(m,"Seconds",o,0),g>=r.hour&&t(m,"Minutes",o,0),g>=r.day&&t(m,"Hours",o,0),g>=r.day*4&&t(m,"Date",o,1),g>=r.year&&t(m,"Month",o,0);var b=0,w=NaN,E;do{E=w,w=m.getTime(),c.push({v:w/u,label:d(w/u,e)});if(p=="month")if(h<1){t(m,"Date",o,1);var S=m.getTime();t(m,"Month",o,n(m,"Month",o)+1);var x=m.getTime();m.setTime(w+b*r.hour+(x-S)*h),b=n(m,"Hours",o),t(m,"Hours",o,0)}else t(m,"Month",o,n(m,"Month",o)+h);else p=="year"?t(m,"FullYear",o,n(m,"FullYear",o)+h):m.setTime(w+g)}while(w0)return{x:t.touches[0].pageX,y:t.touches[0].pageY};if(!e._.isUndefined(t.changedTouches)&&t.changedTouches.length>0)return{x:t.changedTouches[0].pageX,y:t.changedTouches[0].pageY};if(t.pageX||t.pageY)return{x:t.pageX,y:t.pageY};if(t.clientX||t.clientY){var n=document,r=n.body,i=n.documentElement;return{x:t.clientX+r.scrollLeft+i.scrollLeft,y:t.clientY+r.scrollTop+i.scrollTop}}}}}(),function(){var e=Flotr,t=e.DOM,n=e._,r=function(e){this.o=e};r.prototype={dimensions:function(e,t,n,r){return e?this.o.html?this.html(e,this.o.element,n,r):this.canvas(e,t):{width:0,height:0}},canvas:function(t,n){if(!this.o.textEnabled)return;n=n||{};var r=this.measureText(t,n),i=r.width,s=n.size||e.defaultOptions.fontSize,o=n.angle||0,u=Math.cos(o),a=Math.sin(o),f=2,l=6,c;return c={width:Math.abs(u*i)+Math.abs(a*s)+f,height:Math.abs(a*i)+Math.abs(u*s)+l},c},html:function(e,n,r,i){var s=t.create("div");return t.setStyles(s,{position:"absolute",top:"-10000px"}),t.insert(s,'