$data_info{$id}{menu} on $db database indexes
$data_info{$id}{description}
$table_header
};
my $found_table_stat = 0;
foreach my $idx (sort keys %{$all_statio_user_indexes{$db}}) {
next if ($idx eq 'all');
next if (($#INCLUDE_TB >= 0) && !grep(/^$idx$/, @INCLUDE_TB));
my $table_data = '';
if (!$all_statio_user_indexes{$db}{$idx}{idx_blks_read} && !$all_statio_user_indexes{$db}{$idx}{idx_blks_hit}) {
next;
}
$table_data = qq{$idx | $all_statio_user_indexes{$db}{$idx}{idx_blks_read} | $all_statio_user_indexes{$db}{$idx}{idx_blks_hit} |
};
if ($table_data) {
print $table_data;
$found_table_stat = 1;
}
}
if (!$found_table_stat) {
$table_header = qq{ | };
}
print qq{
};
}
%all_stat_user_indexes = ();
}
# Compute statistics of xlog cluster
sub pg_xlog_stat
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | total_file | last_wal_name | wal_recycled | wal_written | max_wal
# case of pgstats file content
if ($#data == 3) {
$all_xlog_stat{$data[0]}{total}++;
} else {
$all_xlog_stat{$data[0]}{total} = ($data[1] || 0);
if ($#data == 5) {
$all_xlog_stat{$data[0]}{recycled} = ($data[3] || 0);
$all_xlog_stat{$data[0]}{written} = ($data[4] || 0);
$all_xlog_stat{$data[0]}{max_wal} = (POSIX::ceil($data[5]) || 0);
}
}
}
$curfh->close();
return $offset;
}
# Compute graphs of xlog cluster statistics
sub pg_xlog_stat_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!scalar keys %all_xlog_stat);
my %xlog_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_xlog_stat) {
$xlog_stat{total} .= '[' . ($t - $tz) . ',' . ($all_xlog_stat{$t}{total} || 0) . '],';
$xlog_stat{recycled} .= '[' . ($t - $tz) . ',' . ($all_xlog_stat{$t}{recycled} || 0) . '],';
$xlog_stat{written} .= '[' . ($t - $tz) . ',' . ($all_xlog_stat{$t}{written} || 0) . '],';
$xlog_stat{max_wal} .= '[' . ($t - $tz) . ',' . ($all_xlog_stat{$t}{max_wal} || 0) . '],';
}
%all_xlog_stat = ();
if ($xlog_stat{total} ) {
my $id = &get_data_id('cluster-xlog_files', %data_info);
$xlog_stat{total} =~ s/,$//;
if (exists $xlog_stat{recycled}) {
push(@{$data_info{$id}{legends}}, 'recycled', 'written', 'max_wal');
print &jqplot_linegraph_array($IDX++, 'cluster-xlog_files', \%{$data_info{$id}}, '', $xlog_stat{total}, $xlog_stat{recycled}, $xlog_stat{written}, $xlog_stat{max_wal});
} else {
print &jqplot_linegraph_array($IDX++, 'cluster-xlog_files', \%{$data_info{$id}}, '', $xlog_stat{total});
}
}
}
# Compute statistics of bgwriter cluster
sub pg_stat_bgwriter
{
my ($input_dir, $file, $offset) = @_;
return if ( $ACTION eq 'database-info' );
my @start_vals = ();
my %total_count = ();
my $tmp_val = 0;
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
if ($#data >= 9) {
$OVERALL_STATS{'bgwriter'}{stats_reset} = $data[-1] if (!exists $OVERALL_STATS{'bgwriter'}{stats_reset} || ($OVERALL_STATS{'bgwriter'}{stats_reset} lt $data[-1]));
}
next if ($ACTION eq 'home');
push(@start_vals, @data) if ($#start_vals < 0);
# Store interval between previous run
$all_stat_bgwriter{$data[0]}{interval} = ($data[0] - $start_vals[0])/1000;
(($data[1] - $start_vals[1]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[1] - $start_vals[1]);
$all_stat_bgwriter{$data[0]}{checkpoints_timed} = $tmp_val;
(($data[2] - $start_vals[2]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[2] - $start_vals[2]);
$all_stat_bgwriter{$data[0]}{checkpoints_req} = $tmp_val;
my $id = 0;
if ($#data > 10) {
$id += 2;
(($data[3] - $start_vals[3]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[3] - $start_vals[3]);
$all_stat_bgwriter{$data[0]}{checkpoint_write_time} = $tmp_val;
(($data[4] - $start_vals[4]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[4] - $start_vals[4]);
$all_stat_bgwriter{$data[0]}{checkpoint_sync_time} = $tmp_val;
}
(($data[3+$id] - $start_vals[3+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[3+$id] - $start_vals[3+$id]);
$all_stat_bgwriter{$data[0]}{buffers_checkpoint} = $tmp_val*8192;
(($data[4+$id] - $start_vals[4+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[4+$id] - $start_vals[4+$id]);
$all_stat_bgwriter{$data[0]}{buffers_clean} = $tmp_val*8192;
(($data[5+$id] - $start_vals[5+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[5+$id] - $start_vals[5+$id]);
$all_stat_bgwriter{$data[0]}{maxwritten_clean} = $tmp_val;
(($data[6+$id] - $start_vals[6+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[6+$id] - $start_vals[6+$id]);
$all_stat_bgwriter{$data[0]}{buffers_backend} = $tmp_val*8192;
if ($#data >= 9) {
(($data[7+$id] - $start_vals[7+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[7+$id] - $start_vals[7+$id]);
$all_stat_bgwriter{$data[0]}{buffers_backend_fsync} .= $tmp_val;
(($data[8+$id] - $start_vals[8+$id]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[8+$id] - $start_vals[8+$id]);
$all_stat_bgwriter{$data[0]}{buffers_alloc} = $tmp_val*8192;
}
@start_vals = ();
push(@start_vals, @data);
}
$curfh->close();
return $offset;
}
# Compute graphs of bgwriter cluster statistics
sub pg_stat_bgwriter_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!scalar keys %all_stat_bgwriter);
my %bgwriter_stat = ();
my %data = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_stat_bgwriter) {
next if (!$all_stat_bgwriter{$t}{interval});
$data{'Checkpoint timed'} += $all_stat_bgwriter{$t}{checkpoints_timed};
$data{'Checkpoint requested'} += $all_stat_bgwriter{$t}{checkpoints_req};
$bgwriter_stat{checkpoints_timed} .= '[' . ($t - $tz) . ',' . $all_stat_bgwriter{$t}{checkpoints_timed} . '],';
$bgwriter_stat{checkpoints_req} .= '[' . ($t - $tz) . ',' . $all_stat_bgwriter{$t}{checkpoints_req} . '],';
$bgwriter_stat{checkpoint_write_time} .= '[' . ($t - $tz) . ',' . ($all_stat_bgwriter{$t}{checkpoint_write_time}||0) . '],';
$bgwriter_stat{checkpoint_sync_time} .= '[' . ($t - $tz) . ',' . ($all_stat_bgwriter{$t}{checkpoint_sync_time}||0) . '],';
$bgwriter_stat{buffers_checkpoint} .= '[' . ($t - $tz) . ',' . int(($all_stat_bgwriter{$t}{buffers_checkpoint}||0)/$all_stat_bgwriter{$t}{interval}) . '],';
$bgwriter_stat{buffers_clean} .= '[' . ($t - $tz) . ',' . int(($all_stat_bgwriter{$t}{buffers_clean}||0)/$all_stat_bgwriter{$t}{interval}) . '],';
$bgwriter_stat{buffers_backend} .= '[' . ($t - $tz) . ',' . int(($all_stat_bgwriter{$t}{buffers_backend}||0)/$all_stat_bgwriter{$t}{interval}) . '],';
$bgwriter_stat{buffers_alloc} .= '[' . ($t - $tz) . ',' . int(($all_stat_bgwriter{$t}{buffers_alloc}||0)/$all_stat_bgwriter{$t}{interval}) . '],';
$bgwriter_stat{maxwritten_clean} .= '[' . ($t - $tz) . ',' . ($all_stat_bgwriter{$t}{maxwritten_clean}||0) . '],';
$bgwriter_stat{buffers_backend_fsync} .= '[' . ($t - $tz) . ',' . ((exists $all_stat_bgwriter{$t}{buffers_backend_fsync}) ? $all_stat_bgwriter{$t}{buffers_backend_fsync} : '0') . '],';
}
%all_stat_bgwriter = ();
my $total_checkpoint = $data{'Checkpoint timed'} + $data{'Checkpoint requested'};
if (($data_info{$ID_ACTION}{name} eq 'cluster-checkpoints') && $total_checkpoint) {
$data_info{$ID_ACTION}{legends}[0] .= ' (' . sprintf("%0.2f", $data{'Checkpoint timed'}*100/($total_checkpoint||1)) . '%)';
$data_info{$ID_ACTION}{legends}[1] .= ' (' . sprintf("%0.2f", $data{'Checkpoint requested'}*100/($total_checkpoint||1)) . '%)';
}
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
next if ($data_info{$id}{name} ne $REAL_ACTION);
if ($data_info{$id}{name} eq 'cluster-checkpoints') {
$bgwriter_stat{checkpoints_timed} =~ s/,$//;
$bgwriter_stat{checkpoints_req} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-checkpoints', \%{$data_info{$id}}, '', $bgwriter_stat{checkpoints_timed}, $bgwriter_stat{checkpoints_req});
} elsif ($data_info{$id}{name} eq 'cluster-checkpoints_time') {
if (exists $bgwriter_stat{checkpoint_write_time}) {
$bgwriter_stat{checkpoint_sync_time} =~ s/,$//;
$bgwriter_stat{checkpoint_write_time} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-checkpoints_time', \%{$data_info{$id}}, '', $bgwriter_stat{checkpoint_write_time}, $bgwriter_stat{checkpoint_sync_time});
}
} elsif ($data_info{$id}{name} eq 'cluster-bgwriter_write') {
$bgwriter_stat{buffers_checkpoint} =~ s/,$//;
$bgwriter_stat{buffers_clean} =~ s/,$//;
$bgwriter_stat{buffers_backend} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-bgwriter_write', \%{$data_info{$id}}, '', $bgwriter_stat{buffers_checkpoint}, $bgwriter_stat{buffers_clean}, $bgwriter_stat{buffers_backend});
} elsif ($data_info{$id}{name} eq 'cluster-bgwriter_read') {
$bgwriter_stat{buffers_alloc} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-bgwriter_read', \%{$data_info{$id}}, '', $bgwriter_stat{buffers_alloc});
} elsif ($data_info{$id}{name} eq 'cluster-bgwriter_count') {
$bgwriter_stat{maxwritten_clean} =~ s/,$//;
$bgwriter_stat{buffers_backend_fsync} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-bgwriter_count', \%{$data_info{$id}}, '', $bgwriter_stat{maxwritten_clean}, $bgwriter_stat{buffers_backend_fsync});
}
}
}
# Compute statistics of connections
sub pg_stat_connections
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my @start_vals = ();
my $tmp_val = 0;
my %db_list = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data, 5));
# timestamp | total | active | waiting | idle_in_xact | datname
# Store list of database
$db_list{$data[5]} = 1;
if ($ACTION eq 'database-connections') {
$all_stat_connections{$data[0]}{$data[5]}{total} = $data[1];
$all_stat_connections{$data[0]}{$data[5]}{active} = $data[2];
$all_stat_connections{$data[0]}{$data[5]}{waiting} = $data[3];
$all_stat_connections{$data[0]}{$data[5]}{idle_in_xact} = $data[4];
$all_stat_connections{$data[0]}{$data[5]}{idle} = ($data[1] - $data[2] - $data[4]);
} else {
$all_stat_connections{$data[0]}{'all'}{total} += $data[1];
$all_stat_connections{$data[0]}{'all'}{active} += $data[2];
$all_stat_connections{$data[0]}{'all'}{waiting} += $data[3];
$all_stat_connections{$data[0]}{'all'}{idle_in_xact} += $data[4];
$all_stat_connections{$data[0]}{'all'}{idle} += ($data[1] - $data[2] - $data[4]);
}
}
$curfh->close();
# Store the full list of database
foreach my $d (keys %db_list) {
push(@global_databases, $d) if (!grep/^$d$/, @global_databases);
}
push(@global_databases, 'all') if (($#global_databases >= 0) && !grep(/^all$/, @global_databases));
return $offset;
}
# Compute graphs of connections statistics
sub pg_stat_connections_report
{
my ($src_base, $db_glob, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %connections_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $time (sort {$a <=> $b} keys %all_stat_connections)
{
if ($ACTION eq 'database-connections') {
foreach my $db (@global_databases) {
next if (($db_glob ne 'all') && ($db ne $db_glob));
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
$connections_stat{$db}{total} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{$db}{total}||0) . '],';
$connections_stat{$db}{active} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{$db}{active}||0) . '],';
$connections_stat{$db}{waiting} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{$db}{waiting}||0) . '],';
$connections_stat{$db}{idle_in_xact} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{$db}{idle_in_xact}||0) . '],';
$connections_stat{$db}{idle} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{$db}{idle}||0) . '],';
}
} else {
$connections_stat{'all'}{total} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{'all'}{total}||0) . '],';
$connections_stat{'all'}{active} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{'all'}{active}||0) . '],';
$connections_stat{'all'}{waiting} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{'all'}{waiting}||0) . '],';
$connections_stat{'all'}{idle_in_xact} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{'all'}{idle_in_xact}||0) . '],';
$connections_stat{'all'}{idle} .= '[' . ($time - $tz) . ',' . ($all_stat_connections{$time}{'all'}{idle}||0) . '],';
}
}
%all_stat_connections = ();
my $id = &get_data_id($ACTION, %data_info);
if (scalar keys %connections_stat > 0) {
foreach my $db (sort keys %connections_stat) {
next if ($db ne $db_glob);
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
$connections_stat{$db}{total} =~ s/,$//;
$connections_stat{$db}{active} =~ s/,$//;
$connections_stat{$db}{waiting} =~ s/,$//;
$connections_stat{$db}{idle_in_xact} =~ s/,$//;
$connections_stat{$db}{idle} =~ s/,$//;
if ($db ne 'all') {
print &jqplot_linegraph_array($IDX++, 'database-connections', \%{$data_info{$id}}, $db, $connections_stat{$db}{active}, $connections_stat{$db}{idle}, $connections_stat{$db}{idle_in_xact}, $connections_stat{$db}{waiting});
} else {
print &jqplot_linegraph_array($IDX++, 'cluster-connections', \%{$data_info{$id}}, 'all', $connections_stat{$db}{active}, $connections_stat{$db}{idle}, $connections_stat{$db}{idle_in_xact}, $connections_stat{$db}{waiting});
}
}
} else {
print &empty_dataset('database-connections', \%{$data_info{$id}}, 'indexes');
}
}
# Compute statistics of user functions call
sub pg_stat_user_functions
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_user_functions = ();
my @start_vals = ();
my $tmp_val = 0;
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | datname | funcid | schemaname | funcname | calls | total_time | self_time
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
push(@start_vals, @data) if ($#start_vals < 0);
$data[4] = "$data[3].$data[4]";
(($data[5] - $start_vals[5]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[5] - $start_vals[5]);
$all_stat_user_functions{$data[1]}{$data[4]}{calls} += $tmp_val;
(($data[6] - $start_vals[6]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[6] - $start_vals[6]);
$all_stat_user_functions{$data[1]}{$data[4]}{total_time} += $tmp_val;
(($data[7] - $start_vals[7]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[7] - $start_vals[7]);
$all_stat_user_functions{$data[1]}{$data[4]}{self_time} += $tmp_val;
@start_vals = ();
push(@start_vals, @data);
}
$curfh->close();
}
# Compute graphs of user functions call statistics
sub pg_stat_user_functions_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my $id = &get_data_id('database-functions', %data_info);
my $table_header = qq{
Schema.function |
Calls |
Total time |
Self time |
};
if (!exists $all_stat_user_functions{$db} || scalar keys %{$all_stat_user_functions{$db}} == 0) {
$table_header = qq{
| };
}
my $colspan = ' colspan="4"';
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
$table_header
};
my $found_fct_stat = 0;
foreach my $fct (sort keys %{$all_stat_user_functions{$db}}) {
if (!$all_stat_user_functions{$db}{$fct}{calls} && !$all_stat_user_functions{$db}{$fct}{total_time} && !$all_stat_user_functions{$db}{$fct}{self_time}) {
next;
}
foreach ('calls','total_time','self_time') {
$all_stat_user_functions{$db}{$fct}{$_} ||= 0;
}
print "$fct | $all_stat_user_functions{$db}{$fct}{calls} | " . &format_duration($all_stat_user_functions{$db}{$fct}{total_time}) . " | " . &format_duration($all_stat_user_functions{$db}{$fct}{self_time}) . " |
\n";
}
print qq{
};
}
# Compute graphs of replication cluster statistics
sub pg_stat_replication
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my @start_vals = ();
my $tmp_val = 0;
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | pid | usesysid | usename | application_name | client_addr | client_hostname | client_port | backend_start | state | master_location | sent_location | write_location | flush_location | replay_location | sync_priority | sync_state
# Do not care about BACKUP and pg_basebackup connection
if ( (uc($data[9]) eq 'STREAMING') || (uc($data[4]) eq 'WALRECEIVER') ) {
my $name = $data[5];
$data[6] =~ s/"//g;
$name .= " - $data[6]" if ($data[6]);
push(@start_vals, @data) if ($#start_vals < 0);
# Store interval between previous run
$all_stat_replication{$data[0]}{interval} = ($data[0] - $start_vals[0])/1000;
$all_stat_replication{$data[0]}{master_location} = &getNumericalOffset($data[10]) - &getNumericalOffset($start_vals[10]) if (! exists $all_stat_replication{$data[0]}{master_location});
next if (!$data[14] && !$data[11] && !$data[12] && !$data[13]);
((&getNumericalOffset($data[10]) - &getNumericalOffset($data[14])) < 0) ? $tmp_val = 0 : $tmp_val = (&getNumericalOffset($data[10]) - &getNumericalOffset($data[14]));
$all_stat_replication{$data[0]}{$name}{replay_location} = $tmp_val;
((&getNumericalOffset($data[10]) - &getNumericalOffset($data[11])) < 0) ? $tmp_val = 0 : $tmp_val = (&getNumericalOffset($data[10]) - &getNumericalOffset($data[11]));
$all_stat_replication{$data[0]}{$name}{sent_location} = $tmp_val;
((&getNumericalOffset($data[10]) - &getNumericalOffset($data[12])) < 0) ? $tmp_val = 0 : $tmp_val = (&getNumericalOffset($data[10]) - &getNumericalOffset($data[12]));
$all_stat_replication{$data[0]}{$name}{write_location} = $tmp_val;
((&getNumericalOffset($data[10]) - &getNumericalOffset($data[13])) < 0) ? $tmp_val = 0 : $tmp_val = (&getNumericalOffset($data[10]) - &getNumericalOffset($data[13]));
$all_stat_replication{$data[0]}{$name}{flush_location} = $tmp_val;
@start_vals = ();
push(@start_vals, @data);
}
}
$curfh->close();
return $offset;
}
# Compute graphs of replication cluster statistics
sub pg_stat_replication_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %xlog_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_stat_replication) {
foreach my $name (sort {$a cmp $b} keys %{$all_stat_replication{$t}}) {
next if ($name eq 'interval');
if ($name eq 'master_location') {
next if (!$all_stat_replication{$t}{interval});
$all_stat_replication{$t}{master_location} ||= 0;
$xlog_stat{master_location} .= '[' . ($t - $tz) . ',' . int(($all_stat_replication{$t}{master_location}||0)/$all_stat_replication{$t}{interval}) . '],';
next;
}
$xlog_stat{$name}{replay_location} .= '[' . ($t - $tz) . ',' . ($all_stat_replication{$t}{$name}{replay_location} || 0) . '],';
$xlog_stat{$name}{sent_location} .= '[' . ($t - $tz) . ',' . ($all_stat_replication{$t}{$name}{sent_location} || 0) . '],';
$xlog_stat{$name}{write_location} .= '[' . ($t - $tz) . ',' . ($all_stat_replication{$t}{$name}{write_location} || 0) . '],';
$xlog_stat{$name}{flush_location} .= '[' . ($t - $tz) . ',' . ($all_stat_replication{$t}{$name}{flush_location} || 0) . '],';
}
}
%all_stat_replication = ();
# return if (scalar keys %xlog_stat == 0);
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
next if ($data_info{$id}{name} ne $REAL_ACTION);
if ($data_info{$id}{name} eq 'cluster-xlog') {
$xlog_stat{master_location} =~ s/,$// if (exists $xlog_stat{master_location});
print &jqplot_linegraph_array($IDX++, 'cluster-xlog', \%{$data_info{$id}}, '', $xlog_stat{master_location});
delete $xlog_stat{master_location} if (exists $xlog_stat{master_location});
} elsif ($data_info{$id}{name} eq 'cluster-replication') {
my $has_data = 0;
foreach my $host (sort {$a cmp $b} keys %xlog_stat) {
next if ($host eq 'master_location');
$has_data = 1;
$xlog_stat{$host}{sent_location} =~ s/,$//;
$xlog_stat{$host}{write_location} =~ s/,$//;
$xlog_stat{$host}{flush_location} =~ s/,$//;
$xlog_stat{$host}{replay_location} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'cluster-replication', \%{$data_info{$id}}, $host, $xlog_stat{$host}{sent_location}, $xlog_stat{$host}{write_location}, $xlog_stat{$host}{replay_location});
}
if (!$has_data) {
print &jqplot_linegraph_array($IDX++, 'cluster-replication', \%{$data_info{$id}});
}
}
}
}
# Compute statistics of pgbouncer
sub pgbouncer_stats
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
next if ($data[1] eq 'pgbouncer');
# timestamp|database|user|cl_active|cl_waiting|sv_active|sv_idle|sv_used|sv_tested|sv_login|maxwait
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{cl_active} += ($data[3] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{cl_waiting} += ($data[4] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{sv_active} += ($data[5] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{sv_idle} += ($data[6] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{sv_used} += ($data[7] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{sv_tested} += ($data[8] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{sv_login} += ($data[9] || 0);
$all_pgbouncer_stats{$data[0]}{$data[1]}{$data[2]}{maxwait} += ($data[10] || 0);
}
$curfh->close();
return $offset;
}
# Compute graphs of pgbouncer statistics
sub pgbouncer_stats_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %pgbouncer_stat = ();
my %total_pool = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_pgbouncer_stats) {
foreach my $db (keys %{$all_pgbouncer_stats{$t}}) {
foreach my $usr (keys %{$all_pgbouncer_stats{$t}{$db}}) {
$pgbouncer_stat{"$db/$usr"}{cl_active} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{cl_active} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{cl_waiting} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{cl_waiting} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{sv_active} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{sv_active} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{sv_idle} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{sv_idle} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{sv_used} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{sv_used} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{sv_tested} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{sv_tested} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{sv_login} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{sv_login} || 0) . '],';
$pgbouncer_stat{"$db/$usr"}{maxwait} .= '[' . ($t - $tz) . ',' . ($all_pgbouncer_stats{$t}{$db}{$usr}{maxwait} || 0) . '],';
$total_pool{$db}{cl_active} += $all_pgbouncer_stats{$t}{$db}{$usr}{cl_active};
$total_pool{$db}{cl_waiting} += $all_pgbouncer_stats{$t}{$db}{$usr}{cl_waiting};
$total_pool{$db}{sv_active} += $all_pgbouncer_stats{$t}{$db}{$usr}{sv_active};
$total_pool{$db}{sv_idle} += $all_pgbouncer_stats{$t}{$db}{$usr}{sv_idle};
$total_pool{$db}{sv_used} += $all_pgbouncer_stats{$t}{$db}{$usr}{sv_used};
$total_pool{$db}{sv_tested} += $all_pgbouncer_stats{$t}{$db}{$usr}{sv_tested};
$total_pool{$db}{sv_login} += $all_pgbouncer_stats{$t}{$db}{$usr}{sv_login};
$total_pool{$db}{maxwait} += $all_pgbouncer_stats{$t}{$db}{$usr}{maxwait};
}
$pgbouncer_stat{$db}{cl_active} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{cl_active} || 0) . '],';
$pgbouncer_stat{$db}{cl_waiting} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{cl_waiting} || 0) . '],';
$pgbouncer_stat{$db}{sv_active} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{sv_active} || 0) . '],';
$pgbouncer_stat{$db}{sv_idle} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{sv_idle} || 0) . '],';
$pgbouncer_stat{$db}{sv_used} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{sv_used} || 0) . '],';
$pgbouncer_stat{$db}{sv_tested} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{sv_tested} || 0) . '],';
$pgbouncer_stat{$db}{sv_login} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{sv_login} || 0) . '],';
$pgbouncer_stat{$db}{maxwait} .= '[' . ($t - $tz) . ',' . ($total_pool{$db}{maxwait} || 0) . '],';
# $total_pool{'all'}{cl_active} += $total_pool{$db}{cl_active};
# $total_pool{'all'}{cl_waiting} += $total_pool{$db}{cl_waiting};
# $total_pool{'all'}{sv_active} += $total_pool{$db}{sv_active};
# $total_pool{'all'}{sv_idle} += $total_pool{$db}{sv_idle};
# $total_pool{'all'}{sv_used} += $total_pool{$db}{sv_used};
# $total_pool{'all'}{sv_tested} += $total_pool{$db}{sv_tested};
# $total_pool{'all'}{sv_login} += $total_pool{$db}{sv_login};
# $total_pool{'all'}{maxwait} += $total_pool{$db}{maxwait};
delete $total_pool{$db};
}
# $pgbouncer_stat{'all'}{cl_active} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{cl_active} || 0) . '],';
# $pgbouncer_stat{'all'}{cl_waiting} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{cl_waiting} || 0) . '],';
# $pgbouncer_stat{'all'}{sv_active} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{sv_active} || 0) . '],';
# $pgbouncer_stat{'all'}{sv_idle} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{sv_idle} || 0) . '],';
# $pgbouncer_stat{'all'}{sv_used} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{sv_used} || 0) . '],';
# $pgbouncer_stat{'all'}{sv_tested} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{sv_tested} || 0) . '],';
# $pgbouncer_stat{'all'}{sv_login} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{sv_login} || 0) . '],';
# $pgbouncer_stat{'all'}{maxwait} .= '[' . ($t - $tz) . ',' . ($total_pool{'all'}{maxwait} || 0) . '],';
}
# Build graph dataset for all pgbouncer pool
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
foreach my $db (sort {$a cmp $b} keys %pgbouncer_stat) {
next if ($DATABASE && ($db ne $DATABASE));
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
next if ($db eq 'all');
next if ($db =~ /\//);
if ($data_info{$id}{name} eq 'pgbouncer-connections') {
$pgbouncer_stat{$db}{cl_active} =~ s/,$//;
$pgbouncer_stat{$db}{cl_waiting} =~ s/,$//;
$pgbouncer_stat{$db}{sv_active} =~ s/,$//;
$pgbouncer_stat{$db}{sv_idle} =~ s/,$//;
$pgbouncer_stat{$db}{sv_used} =~ s/,$//;
$pgbouncer_stat{$db}{maxwait} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-connections', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{cl_active}, $pgbouncer_stat{$db}{cl_waiting}, $pgbouncer_stat{$db}{sv_active}, $pgbouncer_stat{$db}{sv_idle},$pgbouncer_stat{$db}{sv_used}, $pgbouncer_stat{$db}{maxwait});
foreach my $pool (sort {$a cmp $b} keys %pgbouncer_stat) {
next if ($pool !~ /^$db\//);
$pgbouncer_stat{$pool}{cl_active} =~ s/,$//;
$pgbouncer_stat{$pool}{cl_waiting} =~ s/,$//;
$pgbouncer_stat{$pool}{sv_active} =~ s/,$//;
$pgbouncer_stat{$pool}{sv_idle} =~ s/,$//;
$pgbouncer_stat{$pool}{sv_used} =~ s/,$//;
$pgbouncer_stat{$pool}{maxwait} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-connections+', \%{$data_info{$id}}, $pool, $pgbouncer_stat{$pool}{cl_active}, $pgbouncer_stat{$pool}{cl_waiting}, $pgbouncer_stat{$pool}{sv_active}, $pgbouncer_stat{$pool}{sv_idle},$pgbouncer_stat{$pool}{sv_used}, $pgbouncer_stat{$pool}{maxwait});
}
print "\n";
}
}
}
}
sub get_diff
{
my $file = shift;
my $diff_hashref = shift;
return if (! -e $file);
my $curfh = open_filehdl($file);
my $key = '';
while (my $l = <$curfh>) {
chomp($l);
$l =~ s/\r//;
next if ($l =~ /^\+\+\+/);
if ($l =~ /^\-\-\-.*\s(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/) {
my $tz = ((0-$TIMEZONE)*3600);
$key = &timegm_nocheck($6, $5, $4, $3, $2 - 1, $1 - 1900) + $tz;
delete $diff_hashref->{$key} if (exists $diff_hashref->{$key});
next;
}
if ($key) {
$diff_hashref->{$key} .= "$l\n";
}
}
$curfh->close();
}
sub show_diff
{
my %diff = @_;
foreach my $k (sort { $b cmp $a } keys %diff) {
my $date = localtime($k);
print qq{
};
}
}
# Get content of pgbouncer.ini
sub pgbouncer_ini
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!-e "$input_dir/pgbouncer.ini");
%all_pgbouncer_ini = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_pgbouncer_ini{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_pgbouncer_ini{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_pgbouncer_ini_diff);
}
# Show relevant content of pgbouncer.ini
sub pgbouncer_ini_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_pgbouncer_ini{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-pgbouncer', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_pgbouncer_ini = ();
&show_diff(%all_pgbouncer_ini_diff);
%all_pgbouncer_ini_diff = ();
print qq{
};
}
# Collect statistics about pgbouncer queries
sub pgbouncer_req_stats
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my @start_vals = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
next if ($data[1] eq 'pgbouncer');
push(@start_vals, @data) if ($#start_vals < 0);
my $tmp_val = 0;
# timestamp|database|total_requests|total_received|total_sent|total_query_time|avg_req|avg_recv|avg_sent|avg_query
# Since 1.8:
# timestamp|database|total_xact_count|total_query_count|total_received|total_sent|total_xact_time|total_query_time|total_wait_time|avg_xact_count|avg_query_count|avg_recv|avg_sent|avg_xact_time|avg_query_time|avg_wait_time
if ($#data == 9) {
(($data[2] - $start_vals[2]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[2] - $start_vals[2]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_requests} += $tmp_val;
(($data[3] - $start_vals[3]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[3] - $start_vals[3]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_received} += $tmp_val;
(($data[4] - $start_vals[4]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[4] - $start_vals[4]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_sent} += $tmp_val;
(($data[5] - $start_vals[5]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[5] - $start_vals[5]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_query_time} += $tmp_val;
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_req} += $data[6];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_recv} += $data[7];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_sent} += $data[8];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_query} += $data[9];
} else {
(($data[3] - $start_vals[3]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[3] - $start_vals[3]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_requests} += $tmp_val;
(($data[4] - $start_vals[4]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[4] - $start_vals[4]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_received} += $tmp_val;
(($data[5] - $start_vals[5]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[5] - $start_vals[5]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_sent} += $tmp_val;
(($data[7] - $start_vals[7]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[7] - $start_vals[7]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_query_time} += $tmp_val;
(($data[8] - $start_vals[8]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[8] - $start_vals[8]);
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{total_wait_time} += $tmp_val;
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_query_count} += $data[10];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_recv} += $data[11];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_sent} += $data[12];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_query_time} += $data[14];
$all_pgbouncer_req_stats{$data[0]}{$data[1]}{avg_wait_time} += $data[15];
}
@start_vals = ();
push(@start_vals, @data);
}
$curfh->close();
return $offset;
}
# Show report about pgbouncer queries
sub pgbouncer_req_stats_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %pgbouncer_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_pgbouncer_req_stats) {
foreach my $db (keys %{$all_pgbouncer_req_stats{$t}}) {
foreach my $k (keys %{$all_pgbouncer_req_stats{$t}{$db}}) {
$pgbouncer_stat{$db}{$k} .= '[' . ($t - $tz) . ',' . $all_pgbouncer_req_stats{$t}{$db}{$k} . '],';
}
}
}
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
foreach my $db (sort {$a cmp $b} keys %pgbouncer_stat) {
next if ($DATABASE && ($db ne $DATABASE));
next if ($db eq 'all');
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
if ($data_info{$id}{name} eq 'pgbouncer-duration') {
# Assure backward compatibility with old data or cache file with
# pgbouncer < 1.8 avg_query have been renamed into avg_query_time
if (exists $pgbouncer_stat{$db}{avg_query_time}) {
if (exists $pgbouncer_stat{$db}{avg_query}) {
$pgbouncer_stat{$db}{avg_query} .= $pgbouncer_stat{$db}{avg_query_time};
$pgbouncer_stat{$db}{avg_query_time} = $pgbouncer_stat{$db}{avg_query};
}
$pgbouncer_stat{$db}{avg_query_time} =~ s/,$//;
$data_info{$id}{legends} = ['avg_query_time'];
print &jqplot_linegraph_array($IDX++, 'pgbouncer-duration', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{avg_query_time});
} else {
$pgbouncer_stat{$db}{avg_query} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-duration', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{avg_query});
}
} elsif ($data_info{$id}{name} eq 'pgbouncer-number') {
# Assure backward compatibility with old data or cache file with
# pgbouncer < 1.8 avg_req have been renamed into avg_query_count
if (exists $pgbouncer_stat{$db}{avg_query_count}) {
if (exists $pgbouncer_stat{$db}{avg_req}) {
$pgbouncer_stat{$db}{avg_req} .= $pgbouncer_stat{$db}{avg_query_count};
$pgbouncer_stat{$db}{avg_query_count} = $pgbouncer_stat{$db}{avg_req};
}
$pgbouncer_stat{$db}{avg_query_count} =~ s/,$//;
$data_info{$id}{legends} = ['avg_query_count'];
print &jqplot_linegraph_array($IDX++, 'pgbouncer-number', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{avg_query_count});
} else {
$pgbouncer_stat{$db}{avg_req} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-number', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{avg_req});
}
} elsif ($data_info{$id}{name} eq 'pgbouncer-wait-total') {
$pgbouncer_stat{$db}{total_wait_time} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-wait-total', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{total_wait_time});
} elsif ($data_info{$id}{name} eq 'pgbouncer-wait-average') {
$pgbouncer_stat{$db}{avg_wait_time} =~ s/,$//;
print &jqplot_linegraph_array($IDX++, 'pgbouncer-wait-average', \%{$data_info{$id}}, $db, $pgbouncer_stat{$db}{avg_wait_time});
}
}
}
}
# Compute graphs of object size statistics
sub pg_class_size
{
my ($input_dir, $file) = @_;
%all_class_size = ();
my %total_class = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
if ($data[4] !~ /^[a-zA-Z]$/) {
print STDERR "WARNING: incompatible type of file pg_class_size.csv, the second field should be the database name\n";
last;
}
# timestamp | dbname | nspname | relname | relkind | reltuples | relpages | relsize
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
my $size = $data[6]*8192;
$size = $data[7] if ($#data == 7);
if ($ACTION eq 'database-info') {
$total_class{$data[1]}{$data[4]}{"$data[2].$data[3]"} = '';
} else {
$all_class_size{$data[1]}{$data[4]}{"$data[2].$data[3]"}{size} = $size;
$all_class_size{$data[1]}{$data[4]}{"$data[2].$data[3]"}{tuples} = $data[5];
if ($data[5] > 0) {
$all_class_size{$data[1]}{$data[4]}{"$data[2].$data[3]"}{width} = sprintf("%.2f", $size/$data[5]);
} else {
$all_class_size{$data[1]}{$data[4]}{"$data[2].$data[3]"}{width} = '-';
}
}
}
$curfh->close();
if ($ACTION eq 'database-info') {
foreach my $db (sort keys %total_class) {
next if ($DATABASE && ($db ne $DATABASE));
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
# Count the number of object
foreach my $k (sort keys %RELKIND) {
if (exists $total_class{$db}{$k}) {
$OVERALL_STATS{'class'}{$db}{$k} = scalar keys %{$total_class{$db}{$k}};
} else {
$OVERALL_STATS{'class'}{$db}{$k} = 0;
}
}
}
%total_class = ();
}
}
# Compute graphs of object size statistics
sub pg_class_size_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my %data = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
next if ($data_info{$id}{name} !~ /^(table|index)-/);
my $table_header = '';
my $kind = '';
if ($data_info{$id}{name} eq 'table-size') {
$kind = 'tables';
$table_header = qq{
Object name |
Size |
Tuples |
Avg width |
};
} elsif ($data_info{$id}{name} eq 'index-size') {
$kind = 'indexes';
$table_header = qq{
Object name |
Size |
Tuples |
Avg width |
};
}
if (scalar keys %{$all_class_size{$db}} == 0) {
$table_header = qq{
| };
}
print qq{
-
$data_info{$id}{menu} on $db database $kind
$data_info{$id}{description}
$table_header
};
my $found_table_stat = 0;
foreach my $k (sort keys %{$all_class_size{$db}}) {
next if (($k ne 't') && ($k ne 'r') && ($data_info{$id}{name} eq 'table-size'));
next if (($k ne 'i') && ($data_info{$id}{name} eq 'index-size'));
my $colspan = ' colspan="5"';
foreach my $tb (sort {$all_class_size{$db}{$k}{$b}{size} <=> $all_class_size{$db}{$k}{$a}{size} } keys %{$all_class_size{$db}{$k}}) {
next if (!$all_class_size{$db}{$k}{$tb}{size} && !$all_class_size{$db}{$k}{$tb}{tuples});
next if (($#INCLUDE_TB >= 0) && !grep(/^$tb$/, @INCLUDE_TB));
my $table_data = '';
if ($data_info{$id}{name} =~ /^(table|index)-size$/) {
$found_table_stat = 1;
foreach ('size','tuples','width') {
$all_class_size{$db}{$k}{$tb}{$_} ||= 0;
}
$table_data = "$tb | " . &pretty_print_size($all_class_size{$db}{$k}{$tb}{size}) . " | " . int($all_class_size{$db}{$k}{$tb}{tuples}) . " | $all_class_size{$db}{$k}{$tb}{width} |
\n";
}
if ($table_data) {
print $table_data;
}
}
if (!$found_table_stat) {
print qq{ |
};
}
}
print qq{
};
}
%all_class_size = ();
}
# Compute graphs of locks statistics
sub pg_stat_locks
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp|database|label|(type|mode|granted)|count
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
$data[3] = 'granted' if ($data[3] eq 't');
$data[3] = 'waiting' if ($data[3] eq 'f');
$all_stat_locks{$data[0]}{$data[1]}{$data[2]}{$data[3]} += $data[4];
}
$curfh->close();
return $offset;
}
# Compute graphs of locks statistics
sub pg_stat_locks_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my %locks_stat = ();
my %legends = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (keys %all_stat_locks) {
foreach my $lbl (keys %{$all_stat_locks{$t}{$db}}) {
push(@{$legends{$db}{$lbl}}, 'waiting') if ( grep(/^waiting$/, @{$legends{$db}{$lbl}}) && (${$legends{$db}{$lbl}}[0] eq 'granted') );
foreach my $k (@{$legends{$db}{$lbl}}) {
$locks_stat{$db}{$lbl}{$k} .= '[' . ($t - $tz) . ',' . ($all_stat_locks{$t}{$db}{$lbl}{$k}||0) . '],';
}
}
}
%all_stat_locks = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
my @graph_data = ();
if ($data_info{$id}{name} eq 'database-lock-types') {
foreach my $k (sort keys %{$locks_stat{$db}{lock_type}}) {
$locks_stat{$db}{lock_type}{$k} =~ s/,$//;
push(@{$data_info{$id}{legends}}, $k);
push(@graph_data, $locks_stat{$db}{lock_type}{$k});
}
print &jqplot_linegraph_array($IDX++, 'database-lock-types', \%{$data_info{$id}}, $db, @graph_data);
} elsif ($data_info{$id}{name} eq 'database-lock-modes') {
foreach my $k (sort keys %{$locks_stat{$db}{lock_mode}}) {
$locks_stat{$db}{lock_mode}{$k} =~ s/,$//;
push(@{$data_info{$id}{legends}}, $k);
push(@graph_data, $locks_stat{$db}{lock_mode}{$k});
}
print &jqplot_linegraph_array($IDX++, 'database-lock-modes', \%{$data_info{$id}}, $db, @graph_data);
} elsif ($data_info{$id}{name} eq 'database-lock-granted') {
foreach my $k (sort keys %{$locks_stat{$db}{lock_granted}}) {
$locks_stat{$db}{lock_granted}{$k} =~ s/,$//;
push(@{$data_info{$id}{legends}}, $k);
push(@graph_data, $locks_stat{$db}{lock_granted}{$k});
}
print &jqplot_linegraph_array($IDX++, 'database-lock-granted', \%{$data_info{$id}}, $db, @graph_data);
}
}
}
# Compute statistics about unused index
sub pg_stat_unused_indexes
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_unused_indexes = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | dbname | schemaname | relname | indexrelname | index_code
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
push(@{$all_stat_unused_indexes{$data[1]}}, [ ($data[2], $data[3], $data[4], $data[5]) ] );
}
$curfh->close();
}
# Compute report about unused index
sub pg_stat_unused_indexes_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my $id = &get_data_id('unused-index', %data_info);
my $table_header = qq{
Schema |
Table |
Index |
Code |
};
if (!exists $all_stat_unused_indexes{$db} || $#{$all_stat_unused_indexes{$db}} < 0) {
$table_header = qq{
| };
}
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
$table_header
};
foreach my $r (@{$all_stat_unused_indexes{$db}}) {
print '', join(' | ', @$r), " |
\n";
}
print qq{
};
%all_stat_unused_indexes = ();
}
# Compute statistics about redundant index
sub pg_stat_redundant_indexes
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_redundant_indexes = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# Do not report indexes when one is partial and not the other one
# otherwise keep them for user recheck.
next if (grep(/\bWHERE\b/i, $data[2], $data[3]) == 1);
# timestamp | dbname | contained | containing
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
push(@{$all_stat_redundant_indexes{$data[1]}}, [ ($data[2], $data[3]) ] );
}
$curfh->close();
}
# Compute report about redundant index
sub pg_stat_redundant_indexes_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my $id = &get_data_id('redundant-index', %data_info);
my $table_header = qq{
Contained |
Containing |
};
if (!exists $all_stat_redundant_indexes{$db} || $#{$all_stat_redundant_indexes{$db}} < 0) {
$table_header = qq{
| };
}
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
$table_header
};
foreach my $r (@{$all_stat_redundant_indexes{$db}}) {
print '', join(' | ', @$r), " |
\n";
}
print qq{
};
%all_stat_redundant_indexes = ();
}
# Compute statistics about missing index
sub pg_stat_missing_fkindexes
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_missing_fkindexes = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | dbname | relname | ddl
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
push(@{$all_stat_missing_fkindexes{$data[1]}}, [ ($data[2], $data[3]) ] );
}
$curfh->close();
}
# Compute report about missing index
sub pg_stat_missing_fkindexes_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my $id = &get_data_id('missing-index', %data_info);
my $table_header = qq{
Table |
Missing index |
};
if (!exists $all_stat_missing_fkindexes{$db} || $#{$all_stat_missing_fkindexes{$db}} < 0) {
$table_header = qq{
| };
}
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
$table_header
};
foreach my $r (@{$all_stat_missing_fkindexes{$db}}) {
print '', join(' | ', @$r), " |
\n";
}
print qq{
};
%all_stat_missing_fkindexes = ();
}
# Compute statistics about indexes count
sub pg_stat_count_indexes
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_count_indexes = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
# timestamp | dbname | schema | table | count
$all_stat_count_indexes{$data[1]}{$data[2]}{$data[3]} = $data[4];
}
$curfh->close();
}
# Compute report about table without indexes or with too many indexes
sub pg_stat_count_indexes_report
{
my ($src_base, $dbname, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$dbname);
foreach my $db (sort keys %all_stat_count_indexes)
{
next if ($db ne $dbname);
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
my $id = &get_data_id('count-index', %data_info);
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
Schema |
table |
Number of indexes |
};
foreach my $s (sort keys %{$all_stat_count_indexes{$db}}) {
foreach my $t (sort keys %{$all_stat_count_indexes{$db}{$s}}) {
next if ($all_stat_count_indexes{$db}{$s}{$t} > $MAX_INDEXES);
print "$s | $t | $all_stat_count_indexes{$db}{$s}{$t} |
\n";
}
}
print qq{
};
$id = &get_data_id('zero-index', %data_info);
print qq{
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
Schema |
table |
};
foreach my $s (sort keys %{$all_stat_count_indexes{$db}}) {
foreach my $t (sort keys %{$all_stat_count_indexes{$db}{$s}}) {
next if ($all_stat_count_indexes{$db}{$s}{$t} > 0);
print "$s | $t |
\n";
}
}
print qq{
};
}
%all_stat_count_indexes = ();
}
# Compute statistics about extended statistics
sub pg_stat_ext
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_stat_extended_statistics = ();
# Load data from file
my $curfh = open_filehdl("$in_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (($#INCLUDE_DB >= 0) && (!grep($data[1] =~ /^$_$/, @INCLUDE_DB)));
# timestamp | dbname | schemaname | tablename | stats_schemaname | stats_name | stats_owner | attnames | kinds
push(@{$all_stat_extended_statistics{$data[1]}}, [ ($data[4], $data[5], $data[8], $data[7], $data[2], $data[3]) ] );
}
$curfh->close();
}
# Compute report about extended statistics
sub pg_stat_ext_report
{
my ($src_base, $dbname, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$dbname);
my $id = &get_data_id('table-extended', %data_info);
foreach my $db (sort keys %all_stat_extended_statistics) {
next if ($db ne $dbname);
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
print qq{
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
Table |
Extended Statistic |
};
my %stat_kinds = ('f' => 'dependencies', 'd' => 'ndistinct', 'm' => 'mcv');
foreach my $r (sort {"$a->[4].$a->[5]" cmp "$b->[4].$b->[5]"} @{$all_stat_extended_statistics{$db}}) {
$r->[2] =~ s/\{//;
$r->[2] =~ s/\}//;
my @skind = split(',', $r->[2]);
map { s/(.*)/$stat_kinds{$1}/; } @skind;
$r->[3] =~ s/\{//;
$r->[3] =~ s/\}//;
print "$r->[4].$r->[5] | CREATE STATISTICS $r->[0].$r->[1] (", join(',', @skind), ") ON $r->[3] FROM $r->[4].$r->[5]; |
\n";
}
print qq{
};
}
%all_stat_extended_statistics = ();
}
# Get relevant content of postgresql.conf
sub postgresql_conf
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_postgresql_conf = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_postgresql_conf{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_postgresql_conf{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_postgresql_conf_diff);
}
# Show content of postgresql.conf
sub postgresql_conf_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_postgresql_conf{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-pgconf', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_postgresql_conf = ();
&show_diff(%all_postgresql_conf_diff);
%all_postgresql_conf_diff = ();
print qq{
};
}
# Get relevant content of recovery.conf
sub recovery_conf
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_recovery_conf = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_recovery_conf{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_recovery_conf{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_recovery_conf_diff);
}
# Show content of recovery.conf
sub recovery_conf_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_recovery_conf{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-recoveryconf', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_recovery_conf = ();
&show_diff(%all_recovery_conf_diff);
%all_recovery_conf_diff = ();
print qq{
};
}
# Show relevant content of postgresql.auto.conf
sub postgresql_auto_conf
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_postgresql_auto_conf = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_postgresql_auto_conf{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_postgresql_auto_conf{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_postgresql_auto_conf_diff);
}
# Show relevant content of postgresql.auto.conf
sub postgresql_auto_conf_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_postgresql_auto_conf{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-alterconf', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_postgresql_auto_conf = ();
&show_diff(%all_postgresql_auto_conf_diff);
%all_postgresql_auto_conf_diff = ();
print qq{
};
}
# Get relevant content of pg_hba.conf
sub pg_hba_conf
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_pg_hba_conf = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_pg_hba_conf{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_pg_hba_conf{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_pg_hba_conf_diff);
}
# Show content of pg_hba.conf
sub pg_hba_conf_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_pg_hba_conf{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-pghba', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_pg_hba_conf = ();
&show_diff(%all_pg_hba_conf_diff);
%all_pg_hba_conf_diff = ();
print qq{
};
}
# Get relevant content of pg_ident.conf
sub pg_ident_conf
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_pg_ident_conf = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if ($l !~ /^[a-z]/);
$l =~ s/\s*#.*//;
$all_pg_ident_conf{content} .= "$l\n";
}
$curfh->close();
return if (!exists $all_pg_ident_conf{content});
# Load change on configuration file from diff files
&get_diff("$input_dir/$file.diff", \%all_pg_ident_conf_diff);
}
# Show content of pg_ident.conf
sub pg_ident_conf_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $output = $all_pg_ident_conf{content} || '';
if (!$output) {
$output = '
';
} else {
$output = "
\n$output
\n";
}
my $id = &get_data_id('cluster-pgident', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
%all_pg_ident_conf = ();
&show_diff(%all_pg_ident_conf_diff);
%all_pg_ident_conf_diff = ();
print qq{
};
}
# Get configuration from pg_settings
sub pg_settings
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_settings = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | label | setting | value | unit | context | source | boot_val | reset_val | pending_restart
$all_settings{$data[1]}{$data[2]}{value} = $data[3];
$all_settings{$data[1]}{$data[2]}{unit} = '';
$all_settings{$data[1]}{$data[2]}{bootval} = '';
$all_settings{$data[1]}{$data[2]}{resetval} = '';
if ($#data >= 6) {
$all_settings{$data[1]}{$data[2]}{unit} = $data[4];
$all_settings{$data[1]}{$data[2]}{bootval} = $data[7];
$all_settings{$data[1]}{$data[2]}{resetval} = $data[8];
if ($#data >= 9) {
$all_settings{$data[1]}{$data[2]}{pending_restart} = $data[9];
}
if ($data[2] eq 'data_checksums') {
$OVERALL_STATS{'cluster'}{'data_checksums'} = $data[3];
}
}
}
$curfh->close();
# Load change on configuration file from diff files
$file =~ s/\.csv/.diff/;
&get_diff("$input_dir/$file", \%all_settings_diff);
}
# Show configuration from pg_settings
sub pg_settings_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $id = &get_data_id('cluster-settings', %data_info);
my $output = '';
if (scalar keys %all_settings == 0) {
$output = '
';
} else {
foreach my $lbl (sort keys %all_settings) {
$output .= "
$lbl |
---|
\n";
$output .= "
Name | Current | Unit | Reset val | Boot val | Pending restart |
---|
\n";
foreach my $set (sort { lc($a) cmp lc($b) } keys %{$all_settings{$lbl}}) {
$output .= "
$set | $all_settings{$lbl}{$set}{value} | $all_settings{$lbl}{$set}{unit} | ";
if ($all_settings{$lbl}{$set}{resetval}) {
$output .= "$all_settings{$lbl}{$set}{resetval} | ";
} else {
$output .= " | ";
}
if ($all_settings{$lbl}{$set}{bootval}) {
$output .= "$all_settings{$lbl}{$set}{bootval} | ";
} else {
$output .= " | ";
}
if (exists $all_settings{$lbl}{$set}{pending_restart}) {
if ($all_settings{$lbl}{$set}{pending_restart} eq 't') {
$output .= "$all_settings{$lbl}{$set}{pending_restart} | ";
} else {
$output .= "$all_settings{$lbl}{$set}{pending_restart} | ";
}
} else {
$output .= "n/a | ";
}
$output .= "
\n";
}
}
$output = "
\n";
%all_settings = ();
}
%all_settings = ();
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
&show_diff(%all_settings_diff);
%all_settings_diff = ();
print qq{
};
}
# Get non default configuration from pg_settings
sub pg_nondefault_settings
{
my ($input_dir, $file) = @_;
%all_nondefault_settings = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | label | setting | value
$all_nondefault_settings{$data[1]}{$data[2]}{value} = $data[3];
$all_nondefault_settings{$data[1]}{$data[2]}{unit} = '';
$all_nondefault_settings{$data[1]}{$data[2]}{bootval} = '';
$all_nondefault_settings{$data[1]}{$data[2]}{resetval} = '';
if ($#data >= 6) {
$all_nondefault_settings{$data[1]}{$data[2]}{unit} = $data[4];
$all_nondefault_settings{$data[1]}{$data[2]}{bootval} = $data[7];
$all_nondefault_settings{$data[1]}{$data[2]}{resetval} = $data[8];
}
}
$curfh->close();
}
# Show non default configuration from pg_settings
sub pg_nondefault_settings_report
{
my ($src_base, %data_info) = @_;
return if (scalar keys %all_nondefault_settings == 0);
my $id = &get_data_id('cluster-nondefault-settings', %data_info);
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
foreach my $lbl (sort keys %all_nondefault_settings) {
print "$lbl |
\n";
print "Name | Current | Unit | Reset val | Boot val |
\n";
foreach my $set (sort { lc($a) cmp lc($b) } keys %{$all_nondefault_settings{$lbl}}) {
print "$set | $all_nondefault_settings{$lbl}{$set}{value} | $all_nondefault_settings{$lbl}{$set}{unit} | ";
if ($all_nondefault_settings{$lbl}{$set}{resetval}) {
print "$all_nondefault_settings{$lbl}{$set}{resetval} | ";
} else {
print " | ";
}
if ($all_nondefault_settings{$lbl}{$set}{bootval}) {
print "$all_nondefault_settings{$lbl}{$set}{bootval} | ";
} else {
print " | ";
}
print "
\n";
}
}
print qq{
};
%all_nondefault_settings = ();
}
# Get configuration from pg_db_role_setting
sub pg_db_role_setting
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
%all_db_role_setting = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | database | role | settings
$data[1] ||= 'All';
$data[2] ||= 'All';
$all_db_role_setting{$data[1]}{$data[2]} = $data[3];
}
$curfh->close();
# Load change on configuration file from diff files
$file =~ s/\.csv/.diff/;
&get_diff("$input_dir/$file", \%all_db_role_setting_diff);
}
# Show configuration from pg_db_role_setting
sub pg_db_role_setting_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $id = &get_data_id('cluster-dbrolesetting', %data_info);
my $output = '';
if (scalar keys %all_db_role_setting == 0) {
$output = '
';
} else {
$output = "
\n";
$output .= "Database | Role | Settings |
\n";
foreach my $db (sort keys %all_db_role_setting) {
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
foreach my $set (sort { lc($a) cmp lc($b) } keys %{$all_db_role_setting{$db}}) {
$output .= "$db | $set | $all_db_role_setting{$db}{$set} |
\n";
}
}
$output .= "
\n";
%all_db_role_setting = ();
}
print qq{
-
$data_info{$id}{menu}
$data_info{$id}{description}
};
&show_diff(%all_db_role_setting_diff);
%all_db_role_setting_diff = ();
print qq{
};
%all_db_role_setting = ();
}
# Compute statistics of buffercache database
sub pg_database_buffercache
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %db_list = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# Store list of database
$db_list{$data[1]} = 1;
# date_trunc | datname | buffers | buffered | buffers % | database %
$all_database_buffercache{$data[0]}{$data[1]}{shared_buffers_used} = ($data[4]||0);
$all_database_buffercache{$data[0]}{$data[1]}{database_loaded} = ($data[5]||0);
$all_database_buffercache{$data[0]}{'all'}{shared_buffers_used} += ($data[4]||0);
$all_database_buffercache{$data[0]}{'all'}{database_loaded} += ($data[5]||0);
}
$curfh->close();
# Store the full list of database
foreach my $d (keys %db_list) {
push(@global_databases, $d) if (!grep/^$d$/, @global_databases);
}
push(@global_databases, 'all') if (($#global_databases >= 0) && !grep(/^all$/, @global_databases));
return $offset;
}
# Compute report of buffercache database statistics
sub pg_database_buffercache_report
{
my ($src_base, $db_glob, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %shared_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort keys %all_database_buffercache) {
foreach my $db (@global_databases) {
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
$shared_stat{$db}{shared_buffers_used} .= '[' . ($t - $tz) . ',' . ($all_database_buffercache{$t}{$db}{shared_buffers_used}||0) . '],';
$shared_stat{$db}{database_loaded} .= '[' . ($t - $tz) . ',' . ($all_database_buffercache{$t}{$db}{database_loaded}||0) . '],';
}
}
%all_database_buffercache = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
if ($data_info{$id}{name} eq 'cluster-buffersused') {
my @graph_data = ();
foreach my $db (sort keys %shared_stat) {
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
push(@{$data_info{$id}{legends}}, "% used by $db");
$shared_stat{$db}{shared_buffers_used} =~ s/,$//;
push(@graph_data, $shared_stat{$db}{shared_buffers_used});
}
print &jqplot_linegraph_array($IDX++, 'cluster-buffersused', \%{$data_info{$id}}, '', @graph_data);
} elsif ($data_info{$id}{name} eq 'cluster-databaseloaded') {
my @graph_data = ();
foreach my $db (sort keys %shared_stat) {
next if ($db eq 'all');
next if (($db ne 'all') && ($#INCLUDE_DB >= 0) && (!grep($db =~ /^$_$/, @INCLUDE_DB)));
push(@{$data_info{$id}{legends}}, "% of $db");
$shared_stat{$db}{database_loaded} =~ s/,$//;
push(@graph_data, $shared_stat{$db}{database_loaded});
}
print &jqplot_linegraph_array($IDX++, 'cluster-databaseloaded', \%{$data_info{$id}}, '', @graph_data);
}
}
}
# Compute statistics of usagecount in shared buffers
sub pg_database_usagecount
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# date_trunc | datname | usagecount | buffer | buffers %
$all_database_usagecount{$data[0]}{$data[2]} += ($data[4]||0);
}
$curfh->close();
return $offset;
}
# Compute graph of usagecount in shared buffers
sub pg_database_usagecount_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %shared_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort keys %all_database_usagecount) {
foreach my $u (sort keys %{$all_database_usagecount{$t}}) {
$shared_stat{$u}{usagecount} .= '[' . ($t - $tz) . ',' . $all_database_usagecount{$t}{$u} . '],';
}
}
%all_database_usagecount = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
if ($data_info{$id}{name} eq 'cluster-usagecount') {
my @graph_data = ();
foreach my $u (sort keys %shared_stat) {
push(@{$data_info{$id}{legends}}, "% of usagecount $u");
$shared_stat{$u}{usagecount} =~ s/,$//;
push(@graph_data, $shared_stat{$u}{usagecount});
}
print &jqplot_linegraph_array($IDX++, 'cluster-usagecount', \%{$data_info{$id}}, '', @graph_data);
}
}
}
# Compute statistics of dirty buffer in cache
sub pg_database_isdirty
{
my ($input_dir, $file, $offset) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# date_trunc | datname | usagecount | isdirty | buffer | buffers %
$all_database_isdirty{$data[0]}{$data[2]} += $data[5] if ($data[3] eq 't');
}
$curfh->close();
return $offset;
}
# Compute graphs of dirty buffer in cache
sub pg_database_isdirty_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %shared_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort keys %all_database_isdirty) {
foreach my $u (sort keys %{$all_database_isdirty{$t}}) {
$shared_stat{$u}{usagecount} .= '[' . ($t - $tz) . ',' . $all_database_isdirty{$t}{$u} . '],';
}
}
%all_database_isdirty = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
if ($data_info{$id}{name} eq 'cluster-isdirty') {
my @graph_data = ();
foreach my $u (sort keys %shared_stat) {
push(@{$data_info{$id}{legends}}, "% of usagecount $u");
$shared_stat{$u}{usagecount} =~ s/,$//;
push(@graph_data, $shared_stat{$u}{usagecount});
}
print &jqplot_linegraph_array($IDX++, 'cluster-isdirty', \%{$data_info{$id}}, '', @graph_data);
}
}
}
# Compute statistics of archiver
sub pg_stat_archiver
{
my ($input_dir, $file, $offset) = @_;
my @start_vals = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
$curfh->seek($offset,0);
while (my $l = <$curfh>) {
$offset += length($l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
# timestamp | archived_count | last_archived_wal | last_archived_time | failed_count | last_failed_wal | last_failed_time | stats_reset
push(@start_vals, @data) if ($#start_vals < 0);
$data[3] =~ s/\..*//;
$data[6] =~ s/\..*//;
$data[7] =~ s/\..*//;
# Get archiver size statistics
if ( ($ACTION ne 'home') && ($ACTION ne 'database-info') ) {
my $tmp_val = '';
(($data[1] - $start_vals[1]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[1] - $start_vals[1]);
$all_stat_archiver{$data[0]}{archived_count} = $tmp_val;
(($data[4] - $start_vals[4]) < 0) ? $tmp_val = 0 : $tmp_val = ($data[4] - $start_vals[4]);
$all_stat_archiver{$data[0]}{failed_count} = $tmp_val;
$all_stat_archiver{$data[0]}{last_archived_wal} = $data[2];
$all_stat_archiver{$data[0]}{last_archived_time} = $data[3];
$all_stat_archiver{$data[0]}{last_failed_wal} = $data[5];
$all_stat_archiver{$data[0]}{last_failed_time} = $data[6];
$all_stat_archiver{$data[0]}{stats_reset} = $data[7];
} else {
if (!$OVERALL_STATS{'archiver'}{last_archived_time} || ($data[3] gt $OVERALL_STATS{'archiver'}{last_archived_time})) {
$OVERALL_STATS{'archiver'}{last_archived_wal} = $data[2];
$OVERALL_STATS{'archiver'}{last_archived_time} = $data[3];
}
if (!$OVERALL_STATS{'archiver'}{last_failed_time} || ($data[6] gt $OVERALL_STATS{'archiver'}{last_failed_time})) {
$OVERALL_STATS{'archiver'}{last_failed_wal} = $data[5];
$OVERALL_STATS{'archiver'}{last_failed_time} = $data[6];
}
if (!$OVERALL_STATS{'archiver'}{stats_reset} || ($data[7] gt $OVERALL_STATS{'archiver'}{stats_reset})) {
$OVERALL_STATS{'archiver'}{stats_reset} = $data[7];
}
}
@start_vals = ();
push(@start_vals, @data);
}
$curfh->close();
return $offset;
}
# Compute graphs of archiver statistics
sub pg_stat_archiver_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my %archiver_stat = ();
my $tz = ($STATS_TIMEZONE*3600*1000);
foreach my $t (sort {$a <=> $b} keys %all_stat_archiver) {
$archiver_stat{archived_count} .= '[' . ($t - $tz) . ',' . $all_stat_archiver{$t}{archived_count}. '],';
$archiver_stat{failed_count} .= '[' . ($t - $tz) . ',' . $all_stat_archiver{$t}{failed_count}. '],';
}
%all_stat_archiver = ();
foreach my $id (sort {$a <=> $b} keys %data_info) {
next if ($id ne $ID_ACTION);
if ($data_info{$id}{name} eq 'cluster-archive') {
print &jqplot_linegraph_array($IDX++, 'cluster-archive', \%{$data_info{$id}}, '', $archiver_stat{archived_count}, $archiver_stat{failed_count});
}
}
}
# Compute graphs of statements statistics
sub pg_stat_statements
{
my ($input_dir, $file) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
my $total_val = 0;
my @start_vals = ();
my $has_temp = 0;
%all_stat_statements = ();
# Load data from file
my $curfh = open_filehdl("$input_dir/$file");
while (my $l = <$curfh>) {
chomp($l);
next if (!$l);
my @data = split(/;/, $l);
next if (!&normalize_line(\@data));
next if (($#INCLUDE_DB >= 0) && (!grep($data[2] =~ /^$_$/, @INCLUDE_DB)));
# pg 8.4
# timestamp | userid | datname | query | calls | total_time | rows
# pg 9.0-9.1
# timestamp | userid | datname | query | calls | total_time | rows | shared_blks_hit | shared_blks_read | shared_blks_written | local_blks_hit | local_blks_read | local_blks_written | temp_blks_read | temp_blks_written
# pg 9.2+
# timestamp | userid | datname | query | calls | total_time | rows | shared_blks_hit | shared_blks_read | shared_blks_dirtied | shared_blks_written | local_blks_hit | local_blks_read | local_blks_dirtied | local_blks_written | temp_blks_read | temp_blks_written | blk_read_time | blk_write_time |
my $id = 3;
next if (!$data[$id]);
$all_stat_statements{$data[2]}{$data[$id]}{calls} = ($data[$id+1] || 0);
$all_stat_statements{$data[2]}{$data[$id]}{total_time} = ($data[$id+2] || 0);
$all_stat_statements{$data[2]}{$data[$id]}{rows} = ($data[$id+3] || 0);
if ($#data > 6) {
$all_stat_statements{$data[2]}{$data[$id]}{shared_blks_hit} = $data[$id+4];
$all_stat_statements{$data[2]}{$data[$id]}{shared_blks_read} = $data[$id+5];
if ($#data < 18) {
$all_stat_statements{$data[2]}{$data[$id]}{shared_blks_written} = $data[$id+6];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_hit} = $data[$id+7];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_read} = $data[$id+8];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_written} = $data[$id+9];
$all_stat_statements{$data[2]}{$data[$id]}{temp_blks_read} = $data[$id+10];
$all_stat_statements{$data[2]}{$data[$id]}{temp_blks_written} = $data[$id+11];
# This is just a flag, the total_time key is not used but must exists to not generate
# error on use of unitialised value later. This is ugly but useful
$all_stat_statements{$data[2]}{has_temp}{total_time} = 1;
} else {
$all_stat_statements{$data[2]}{$data[$id]}{shared_blks_dirtied} = ($data[$id+6]*8192);
$all_stat_statements{$data[2]}{$data[$id]}{shared_blks_written} = $data[$id+7];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_hit} = $data[$id+8];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_read} = $data[$id+9];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_dirtied} = $data[$id+10];
$all_stat_statements{$data[2]}{$data[$id]}{local_blks_written} = $data[$id+11];
$all_stat_statements{$data[2]}{$data[$id]}{temp_blks_read} = ($data[$id+12]*8192);
$all_stat_statements{$data[2]}{$data[$id]}{temp_blks_written} = ($data[$id+13]*8192);
$all_stat_statements{$data[2]}{$data[$id]}{blk_read_time} = $data[$id+14];
$all_stat_statements{$data[2]}{$data[$id]}{blk_write_time} = $data[$id+15];
# This is just a flag, the total_time key is not used but must exists to not generate
# error on use of unitialised value later. This is ugly but useful
$all_stat_statements{$data[2]}{has_temp}{total_time} = 2;
if ($data[$id+14] || $data[$id+15]) {
$all_stat_statements{$data[2]}{has_temp}{total_time} = 3;
}
}
}
}
$curfh->close();
}
# Compute graphs of statements statistics
sub pg_stat_statements_report
{
my ($src_base, $db, %data_info) = @_;
return if ( ($ACTION eq 'home') || ($ACTION eq 'database-info') );
return if (!$db);
my $id = &get_data_id('database-queries', %data_info);
my $header = '';
if (exists $all_stat_statements{$db}) {
if (exists $all_stat_statements{$db}{has_temp}) {
$header .= qq{
Temp blocks written | };
}
if ($all_stat_statements{$db}{has_temp}{total_time} >= 2) {
$header .= qq{
Blocks read | Blocks hit | Blocks dirtied | Blocks written | };
}
if ($all_stat_statements{$db}{has_temp}{total_time} == 3) {
$header .= qq{
I/O time | };
}
$header = qq{
Calls | Avg time | Total time | Rows | $header
Query | };
} else {
$header = qq{
| };
}
print qq{
-
$data_info{$id}{menu} on $db database
$data_info{$id}{description}
$header
};
foreach my $q (sort {$all_stat_statements{$db}{$b}{total_time} <=> $all_stat_statements{$db}{$a}{total_time}} keys %{$all_stat_statements{$db}}) {
next if ($q eq 'has_temp');
my $additional_cols = '';
if (exists $all_stat_statements{$db}{has_temp}) {
$additional_cols = "" . &pretty_print_number($all_stat_statements{$db}{$q}{temp_blks_written}) . " | ";
}
if ($all_stat_statements{$db}{has_temp}{total_time} == 2) {
$additional_cols .= "" . &pretty_print_number($all_stat_statements{$db}{$q}{shared_blks_read}) . " | " . &pretty_print_number($all_stat_statements{$db}{$q}{shared_blks_hit}) . " | " . &pretty_print_number($all_stat_statements{$db}{$q}{shared_blks_dirtied}) . " | " . &pretty_print_number($all_stat_statements{$db}{$q}{shared_blks_written}) . " | ";
}
if ($all_stat_statements{$db}{has_temp}{total_time} == 3) {
$additional_cols .= "" . sprintf("%0.2d", ($all_stat_statements{$db}{$q}{blk_read_time}+$all_stat_statements{$db}{$q}{blk_write_time})/($all_stat_statements{$db}{$q}{calls}||1)) . " | ";
}
my $query = $q;
$query =~ s/#SMCLN#/;/g;
print "$all_stat_statements{$db}{$q}{calls} | " . &format_duration(int($all_stat_statements{$db}{$q}{total_time}/($all_stat_statements{$db}{$q}{calls}||1))) . " | " . &format_duration($all_stat_statements{$db}{$q}{total_time}) . " | $all_stat_statements{$db}{$q}{rows} | $additional_cols$query |
\n";
}
print qq{
};
%all_stat_statements = ();
}
sub normalize_line
{
my $data = shift;
# Get position of the database name
my $pos = shift;
$pos ||= 2;
return 0 if ($data->[0] !~ /^\d+/);
$data->[0] = &convert_time($data->[0]);
# Skip unwanted lines
return 0 if ($BEGIN && ($data->[0] < $BEGIN));
return 0 if ($END && ($data->[0] > $END));
chomp($data->[-1]);
map { s/,/\./g; s/^$/0/; } @$data;
# Always skip default template database
return 0 if ($data->[$pos] =~ /template/);
return 1;
}
sub pretty_print_size
{
my $val = shift;
return 0 if (!$val);
return '-' if ($val eq '-');
if ($val >= 1125899906842624) {
$val = ($val / 1125899906842624);
$val = sprintf("%0.2f", $val) . " PB";
} elsif ($val >= 1099511627776) {
$val = ($val / 1099511627776);
$val = sprintf("%0.2f", $val) . " TB";
} elsif ($val >= 1073741824) {
$val = ($val / 1073741824);
$val = sprintf("%0.2f", $val) . " GB";
} elsif ($val >= 1048576) {
$val = ($val / 1048576);
$val = sprintf("%0.2f", $val) . " MB";
} elsif ($val >= 1024) {
$val = ($val / 1024);
$val = sprintf("%0.2f", $val) . " KB";
} else {
$val = $val . " B";
}
return $val;
}
sub pretty_print_number
{
my $val = shift;
return 0 if (!$val);
return '-' if ($val eq '-');
if ($val >= 1000000000000000) {
$val = ($val / 1000000000000000);
$val = sprintf("%0.2f", $val) . " P";
} elsif ($val >= 1000000000000) {
$val = ($val / 1000000000000);
$val = sprintf("%0.2f", $val) . " T";
} elsif ($val >= 1000000000) {
$val = ($val / 1000000000);
$val = sprintf("%0.2f", $val) . " G";
} elsif ($val >= 1000000) {
$val = ($val / 1000000);
$val = sprintf("%0.2f", $val) . " M";
} elsif ($val >= 1000) {
$val = ($val / 1000);
$val = sprintf("%0.2f", $val) . " K";
}
return $val;
}
sub show_home
{
my $input_dir = shift();
# Compute global statistics for home page dashboard
my %overall_stat_databases = ();
if (exists $OVERALL_STATS{'cluster'}) {
$OVERALL_STATS{'cluster'}{'blks_hit'} ||= 0;
$OVERALL_STATS{'cluster'}{'blks_read'} ||= 0;
$OVERALL_STATS{'cluster'}{'cache_ratio'} = sprintf("%3d", ($OVERALL_STATS{'cluster'}{'blks_hit'} * 100) / (($OVERALL_STATS{'cluster'}{'blks_read'} + $OVERALL_STATS{'cluster'}{'blks_hit'}) || 1)) . "%";
$OVERALL_STATS{'cluster'}{'temp_bytes'} = &pretty_print_size($OVERALL_STATS{'cluster'}{'temp_bytes'});
foreach my $db (keys %{$OVERALL_STATS{'database'}}) {
next if ($db eq 'all');
$OVERALL_STATS{'database'}{$db}{'blks_hit'} ||= 0;
$OVERALL_STATS{'database'}{$db}{'blks_read'} ||= 0;
$OVERALL_STATS{'database'}{$db}{'blks_hit'} ||= 0;
$OVERALL_STATS{'database'}{$db}{'cache_ratio'} = sprintf("%3d", ($OVERALL_STATS{'database'}{$db}{'blks_hit'} * 100) / (($OVERALL_STATS{'database'}{$db}{'blks_read'} + $OVERALL_STATS{'database'}{$db}{'blks_hit'}) || 1));
}
foreach my $db (keys %{$OVERALL_STATS{'database'}}) {
next if ($db eq 'all');
$OVERALL_STATS{'cluster'}{'size'} += ($OVERALL_STATS{'database'}{$db}{'size'} || 0);
next if (($#INCLUDE_DB >= 0) && !grep($db =~ /^$_$/, @INCLUDE_DB));
if (exists $OVERALL_STATS{'database'}{$db}{'size'}) {
if (!exists $overall_stat_databases{'size'} || $OVERALL_STATS{'database'}{$db}{'size'} > $overall_stat_databases{'size'}[1]) {
@{$overall_stat_databases{'size'}} = ($db, $OVERALL_STATS{'database'}{$db}{'size'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'nbackend'}) {
if (!exists $overall_stat_databases{'nbackend'} || $OVERALL_STATS{'database'}{$db}{'nbackend'} > $overall_stat_databases{'nbackend'}[1]) {
@{$overall_stat_databases{'nbackend'}} = ($db, $OVERALL_STATS{'database'}{$db}{'nbackend'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'returned'}) {
if (!exists $overall_stat_databases{'returned'} || $OVERALL_STATS{'database'}{$db}{'returned'} > $overall_stat_databases{'returned'}[1]) {
@{$overall_stat_databases{'returned'}} = ($db, $OVERALL_STATS{'database'}{$db}{'returned'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'temp_files'}) {
if (!exists $overall_stat_databases{'temp_files'} || $OVERALL_STATS{'database'}{$db} {'temp_files'} > $overall_stat_databases{'temp_files'}[1]) {
@{$overall_stat_databases{'temp_files'}} = ($db, $OVERALL_STATS{'database'}{$db}{'temp_files'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'temp_bytes'}) {
if (!exists $overall_stat_databases{'temp_bytes'} || $OVERALL_STATS{'database'}{$db}{'temp_bytes'} > $overall_stat_databases{'temp_bytes'}[1]) {
@{$overall_stat_databases{'temp_bytes'}} = ($db, $OVERALL_STATS{'database'}{$db}{'temp_bytes'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'deadlocks'}) {
if (!exists $overall_stat_databases{'deadlocks'} || $OVERALL_STATS{'database'}{$db}{'deadlocks'} > $overall_stat_databases{'deadlocks'}[1]) {
@{$overall_stat_databases{'deadlocks'}} = ($db, $OVERALL_STATS{'database'}{$db}{'deadlocks'});
}
}
if (exists $OVERALL_STATS{'database'}{$db}{'cache_ratio'}) {
if (!exists $overall_stat_databases{'cache_ratio'} || $OVERALL_STATS{'database'}{$db}{'cache_ratio'} < $overall_stat_databases{'cache_ratio'}[1]) {
@{$overall_stat_databases{'cache_ratio'}} = ($db, $OVERALL_STATS{'database'}{$db}{'cache_ratio'});
}
}
}
@{$overall_stat_databases{'size'}} = ('unknown', 0) if (!exists $overall_stat_databases{'size'});
if (exists $overall_stat_databases{'size'}) {
$overall_stat_databases{'size'}[1] = &pretty_print_size($overall_stat_databases{'size'}[1]);
}
if (exists $overall_stat_databases{'temp_bytes'}) {
$overall_stat_databases{'temp_bytes'}[1] = &pretty_print_size($overall_stat_databases{'temp_bytes'}[1]);
}
}
my %overall_system_stats = ();
if (!$DISABLE_SAR) {
if (!exists $OVERALL_STATS{'system'}{'cpu'}) {
@{$OVERALL_STATS{'system'}{'cpu'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'cpu'}}[0] = localtime(${$OVERALL_STATS{'system'}{'cpu'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'load'}) {
@{$OVERALL_STATS{'system'}{'load'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'load'}}[0] = localtime(${$OVERALL_STATS{'system'}{'load'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'blocked'}) {
@{$OVERALL_STATS{'system'}{'blocked'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'blocked'}}[0] = localtime(${$OVERALL_STATS{'system'}{'blocked'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'kbcached'}) {
@{$OVERALL_STATS{'system'}{'kbcached'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'kbcached'}}[0] = localtime(${$OVERALL_STATS{'system'}{'kbcached'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'kbdirty'}) {
@{$OVERALL_STATS{'system'}{'kbdirty'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'kbdirty'}}[0] = localtime(${$OVERALL_STATS{'system'}{'kbdirty'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'bread'}) {
@{$OVERALL_STATS{'system'}{'bread'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'bread'}}[0] = localtime(${$OVERALL_STATS{'system'}{'bread'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'bwrite'}) {
@{$OVERALL_STATS{'system'}{'bwrite'}} = ('unknown', 0);
} else {
${$OVERALL_STATS{'system'}{'bwrite'}}[0] = localtime(${$OVERALL_STATS{'system'}{'bwrite'}}[0]/1000);
}
if (!exists $OVERALL_STATS{'system'}{'svctm'}) {
@{$OVERALL_STATS{'system'}{'svctm'}} = ('unknown', 0, 'unknown');
} else {
${$OVERALL_STATS{'system'}{'svctm'}}[0] = localtime(${$OVERALL_STATS{'system'}{'svctm'}}[0]/1000);
}
if (exists $OVERALL_STATS{'system'}{'devices'}) {
foreach my $d (sort keys %{$OVERALL_STATS{'system'}{'devices'}}) {
if (! exists $overall_system_stats{read} || ($overall_system_stats{read}[1] < $OVERALL_STATS{'system'}{'devices'}{$d}{read})) {
@{$overall_system_stats{read}} = ($d, $OVERALL_STATS{'system'}{'devices'}{$d}{read});
}
if (! exists $overall_system_stats{write} || ($overall_system_stats{write}[1] < $OVERALL_STATS{'system'}{'devices'}{$d}{write})) {
@{$overall_system_stats{write}} = ($d, $OVERALL_STATS{'system'}{'devices'}{$d}{write});
}
if (! exists $overall_system_stats{tps} || ($overall_system_stats{tps}[1] < $OVERALL_STATS{'system'}{'devices'}{$d}{tps})) {
@{$overall_system_stats{tps}} = ($d, $OVERALL_STATS{'system'}{'devices'}{$d}{tps});
}
}
}
if (exists $OVERALL_STATS{'system'}{'space'}) {
foreach my $d (sort keys %{$OVERALL_STATS{'system'}{'space'}}) {
if (! exists $overall_system_stats{'fsused'} || ($overall_system_stats{'fsused'}[1] < $OVERALL_STATS{'system'}{'space'}{$d}{'fsused'})) {
@{$overall_system_stats{'fsused'}} = ($d, $OVERALL_STATS{'system'}{'space'}{$d}{'fsused'});
}
if (! exists $overall_system_stats{'iused'} || ($overall_system_stats{'iused'}[1] < $OVERALL_STATS{'system'}{'space'}{$d}{'iused'})) {
@{$overall_system_stats{'iused'}} = ($d, $OVERALL_STATS{'system'}{'space'}{$d}{'iused'});
}
}
}
if (!exists $overall_system_stats{read}) {
@{$overall_system_stats{read}} = ('unknown', 0);
}
if (!exists $overall_system_stats{write}) {
@{$overall_system_stats{write}} = ('unknown', 0);
}
if (!exists $overall_system_stats{tps}) {
@{$overall_system_stats{tps}} = ('unknown', 0);
}
if (!exists $overall_system_stats{fsused}) {
@{$overall_system_stats{fsused}} = ('unknown', 0);
}
if (!exists $overall_system_stats{iused}) {
@{$overall_system_stats{iused}} = ('unknown', 0);
}
$overall_system_stats{read}[1] = &pretty_print_size($overall_system_stats{read}[1]);
$overall_system_stats{write}[1] = &pretty_print_size($overall_system_stats{write}[1]);
@{$overall_system_stats{kbcached}} = ($OVERALL_STATS{'system'}{'kbcached'}[0], &pretty_print_size($OVERALL_STATS{'system'}{'kbcached'}[1]));
@{$overall_system_stats{kbdirty}} = ($OVERALL_STATS{'system'}{'kbdirty'}[0], &pretty_print_size($OVERALL_STATS{'system'}{'kbdirty'}[1]));
@{$overall_system_stats{bread}} = ($OVERALL_STATS{'system'}{'bread'}[0], &pretty_print_size($OVERALL_STATS{'system'}{'bread'}[1]));
@{$overall_system_stats{bwrite}} = ($OVERALL_STATS{'system'}{'bwrite'}[0], &pretty_print_size($OVERALL_STATS{'system'}{'bwrite'}[1]));
}
my $numcol = 4;
if ($DISABLE_SAR) {
$numcol = 6;
} elsif (!exists $OVERALL_STATS{'system'}) {
$numcol = 12;
}
print <
-
EOF
my $tz = ((0-$STATS_TIMEZONE)*3600);
if (exists $OVERALL_STATS{'start_date'}) {
$OVERALL_STATS{'start_date'} = ($OVERALL_STATS{'start_date'}/1000) + $tz;
$OVERALL_STATS{'end_date'} = ($OVERALL_STATS{'end_date'}/1000) + $tz;
}
my $start_date = localtime($OVERALL_STATS{'start_date'}||0) || 'Unknown start date';
my $end_date = localtime($OVERALL_STATS{'end_date'}||0) || 'Unknown end date';
if (exists $OVERALL_STATS{'sar_start_date'}) {
$OVERALL_STATS{'sar_start_date'} = ($OVERALL_STATS{'sar_start_date'}/1000);
$OVERALL_STATS{'sar_end_date'} = ($OVERALL_STATS{'sar_end_date'}/1000);
}
my $sar_start_date = localtime($OVERALL_STATS{'sar_start_date'}||0) || 'Unknown start date';
my $sar_end_date = localtime($OVERALL_STATS{'sar_end_date'}||0) || 'Unknown end date';
my $parts_info = '';
if (exists $OVERALL_STATS{'cluster'}) {
$OVERALL_STATS{'cluster'}{'size'} ||= 0;
$OVERALL_STATS{'cluster'}{'nbackend'} ||= 0;
$OVERALL_STATS{'cluster'}{'returned'} ||= 0;
$OVERALL_STATS{'cluster'}{'cache_ratio'} ||= 0;
$OVERALL_STATS{'cluster'}{'temp_files'} ||= 0;
$OVERALL_STATS{'cluster'}{'temp_bytes'} ||= 0;
$OVERALL_STATS{'cluster'}{'deadlocks'} ||= 0;
my $temp_file_info = '';
if ($OVERALL_STATS{'cluster'}{'temp_files'}) {
$temp_file_info = qq{
- $OVERALL_STATS{'cluster'}{'temp_files'} Temporary files
- $OVERALL_STATS{'cluster'}{'temp_bytes'} Temporary files size
};
}
my $deadlock_info = '';
if ($OVERALL_STATS{'cluster'}{'deadlocks'}) {
$deadlock_info = qq{
- $OVERALL_STATS{'cluster'}{'deadlocks'} Deadlocks
};
}
my $extnum = $#{$OVERALL_STATS{'cluster'}{'extensions'}} + 1;
my $extensions_info = '';
if ($extnum) {
my $extlist = join(', ', @{$OVERALL_STATS{'cluster'}{'extensions'}});
$extensions_info = qq{- $extnum Extensions ($extlist)
};
}
my $nparts = $#{$OVERALL_STATS{'cluster'}{'partitionned_tables'}} + 1;
if ($nparts) {
my %partitions = ();
foreach my $pt (sort @{$OVERALL_STATS{'cluster'}{'partitionned_tables'}}) {
$pt =~ s/^([^\.]+)\.//;
$partitions{$1} .= "$pt;";
}
$parts_info = qq{- $nparts Partitioned tables
};
foreach my $t (keys %partitions) {
$parts_info .= qq{- $t $partitions{$t}
};
}
}
$sysinfo{PGVERSION}{'full_version'} ||= '';
$sysinfo{PGVERSION}{'uptime'} ||= '';
my $database_number = scalar keys %{$OVERALL_STATS{'database'}};
$OVERALL_STATS{'cluster'}{'size'} ||= '-';
$OVERALL_STATS{'cluster'}{'nbackend'} ||= '-';
$OVERALL_STATS{'cluster'}{'returned'} ||= '-';
$OVERALL_STATS{'cluster'}{'cache_ratio'} ||= '-';
my $bgwriter_reset = '';
if (exists $OVERALL_STATS{'bgwriter'}{stats_reset}) {
$bgwriter_reset = "- $OVERALL_STATS{'bgwriter'}{stats_reset} Last bgwriter stats reset
";
}
my $cluster_size = &pretty_print_size($OVERALL_STATS{'cluster'}{'size'});
$OVERALL_STATS{'cluster'}{unlogged} ||= 0;
my $unlogged_dblist = '';
if (exists $OVERALL_STATS{'cluster'}{unlogged_db}) {
$unlogged_dblist = ' (' . join(', ', @{$OVERALL_STATS{'cluster'}{unlogged_db}}) . ')';
}
$OVERALL_STATS{'cluster'}{invalid_indexes} ||= 0;
my $invalid_dblist = '';
if (exists $OVERALL_STATS{'cluster'}{invalid_indexes_db}) {
$invalid_dblist = ' (' . join(', ', @{$OVERALL_STATS{'cluster'}{invalid_indexes_db}}) . ')';
}
$OVERALL_STATS{'cluster'}{hash_indexes} ||= 0;
my $hash_dblist = '';
if (exists $OVERALL_STATS{'cluster'}{hash_indexes_db}) {
$hash_dblist = ' (' . join(', ', @{$OVERALL_STATS{'cluster'}{hash_indexes_db}}) . ')';
}
# On version prior to 9.3 this is not applicable
$OVERALL_STATS{'cluster'}{'data_checksums'} ||= 'N/A';
print <
EOF
}
if (!$DISABLE_SAR) {
print <
EOF
}
print <
EOF
if (exists $sysinfo{PGVERSION}) {
my $cache_info = '';
$cache_info = "- Cache was last built on $sysinfo{CACHE}{last_run}
" if (exists $sysinfo{CACHE});
my $uptime = '';
$uptime = "- Up since $sysinfo{PGVERSION}{'uptime'}
" if ($sysinfo{PGVERSION}{'uptime'});
print <
EOF
}
if (exists $OVERALL_STATS{'archiver'}) {
my $archiver_infos = '';
if (exists $OVERALL_STATS{'archiver'}{last_archived_wal}) {
$archiver_infos = qq{
- $OVERALL_STATS{'archiver'}{last_archived_wal} Last archived wal
};
$archiver_infos .= qq{
- $OVERALL_STATS{'archiver'}{last_archived_time} Last archived time
};
}
if (exists $OVERALL_STATS{'archiver'}{last_failed_wal}) {
$archiver_infos .= qq{
- $OVERALL_STATS{'archiver'}{last_failed_wal} Last failed wal
};
$archiver_infos .= qq{
- $OVERALL_STATS{'archiver'}{last_failed_time} Last failed time
};
}
if (exists $OVERALL_STATS{'archiver'}{stats_reset}) {
$archiver_infos .= qq{
- $OVERALL_STATS{'archiver'}{stats_reset} Last stats reset
};
}
print <
EOF
}
if ($parts_info) {
print <
EOF
}
if (exists $sysinfo{INSTALLATION}) {
my $package_infos = join('', @{$sysinfo{INSTALLATION}});
print <
EOF
}
if (exists $sysinfo{CRONTAB}) {
my $cron_infos = join('', @{$sysinfo{CRONTAB}});
print <
EOF
}
print <
EOF
}
sub wrong_date_selection
{
my $show_last_stats = shift;
if ($show_last_stats) {
$show_last_stats = &last_know_statistics();
}
my $end_date = timelocal_nocheck(0, 0, 0, $o_day, $o_month - 1, $o_year - 1900) + 86400;
$end_date = strftime("%Y-%m-%d %H:%M:00",localtime($end_date));
print <
No data found
$PROGRAM is not able to find any data relative to your date selection.
- Start: $o_year-$o_month-$o_day $o_hour:00:00
- End : $end_date
Please choose more accurate dates or use time selector menu.
$show_last_stats
EOF
}
sub last_know_statistics
{
my $start_stats = '';
my $end_stats = '';
# Search the path to last computed statistics
# Search years / months / days / hours directories
if (not opendir(DIR, "$INPUT_DIR")) {
return "FATAL: Can't open directory $INPUT_DIR: $!
\n";
}
my @years = grep { /^\d+$/ && -d "$INPUT_DIR/$_" } readdir(DIR);
closedir(DIR);
if ($#years >= 0) {
foreach my $y (sort { $b <=> $a } @years) {
if (not opendir(DIR, "$INPUT_DIR/$y")) {
return "FATAL: Can't open directory $INPUT_DIR/$y: $!
\n";
}
my @months = grep { /^\d+$/ && -d "$INPUT_DIR/$y/$_" } readdir(DIR);
closedir(DIR);
foreach my $m (sort { $b <=> $a } @months) {
if (not opendir(DIR, "$INPUT_DIR/$y/$m")) {
return "FATAL: Can't open directory $INPUT_DIR/$y/$m: $!
\n";
}
my @days = grep { /^\d+$/ && -d "$INPUT_DIR/$y/$m/$_" } readdir(DIR);
closedir(DIR);
foreach my $d (sort { $b <=> $a } @days) {
if (not opendir(DIR, "$INPUT_DIR/$y/$m/$d")) {
return "FATAL: Can't open directory $INPUT_DIR/$y/$m/$d: $!
\n";
}
$start_stats = "$y-$m-$d\%2000:00";
$end_stats = timelocal_nocheck(0, 0, 0, $d, $m - 1, $y - 1900) + 86400;
$end_stats = strftime("%Y-%m-%d%%2000:00",localtime($end_stats));
# my @hours = grep { /^\d+$/ && -d "$INPUT_DIR/$y/$m/$d/$_" } readdir(DIR);
# closedir(DIR);
# if ($#hours == -1) {
# $start_stats = "$y-$m-$d\%2000:00";
# $end_stats = timelocal_nocheck(0, 0, 0, $d, $m - 1, $y - 1900) + 86400;
# $end_stats = strftime("%Y-%m-%d%%20%H:%M",localtime($end_stats));
# } else {
# foreach my $h (sort { $b <=> $a } @hours) {
# $start_stats = "$y-$m-$d\%20$h:00";
# $end_stats = timegm_nocheck(0, 0, $h, $d, $m - 1, $y - 1900) + 3600;
# $end_stats = strftime("%Y-%m-%d%%20%H:%M",localtime($end_stats));
# last;
# }
# }
last;
}
last;
}
last;
}
}
if ($start_stats) {
return " Last known statistics
\n";
}
return " No statistics found
\n";
}
sub show_about
{
print <
About $PROGRAM
$PROGRAM is a Perl program used to perform a full audit of a PostgreSQL Cluster. It is divided in two parts, a collector used to grab statistics on the PostgreSQL cluster using psql and sysstat, a grapher that will generate all HTML output. It is fully open source and free of charge.
License
$PROGRAM is licenced under the PostgreSQL Licence a liberal Open Source license, similar to the BSD or MIT licenses.
That mean that all parts of the program are open source and free of charge.
This is the case for both, the collector and the grapher programs.
Authors
$PROGRAM is an original development of Gilles Darold.
Some parts of the collector are taken from pgstats a C program writen by Guillaume Lelarge and especially the SQL queries including the compatibility with all PostgreSQL versions.
Btw $PROGRAM grapher is compatible with files generated by pgstats, sar and sadc so you can use it independently to graph those data. Some part of the sar output parser are taken from SysUsage
EOF
}
sub show_sysinfo
{
my $input_dir = shift;
&read_sysinfo($input_dir);
$sysinfo{UPTIME}{'uptime'} = '-' if (!exists $sysinfo{UPTIME}{'uptime'});
my $release_version = '';
if ($sysinfo{RELEASE}{'version'}) {
$release_version = qq{$sysinfo{RELEASE}{'version'} Version};
}
my $sysctl_info = '';
my $hugepage_info = '';
foreach my $k (sort keys %{$sysinfo{SYSTEM}}) {
next if ($k =~ /^kernel.*shm/);
if ($k =~ /transparent_hugepage/) {
my $k2 = $k;
$k2 =~ s/\/sys\/kernel\/mm\/transparent_hugepage\///;
$sysinfo{SYSTEM}{$k} =~ s/.*\[(.*)\].*/$1/;
$hugepage_info .= <$k2 $sysinfo{SYSTEM}{$k}
EOF
} else {
my $lbl = $k;
$lbl =~ s/kernel\.//;
$sysctl_info .= "$lbl $sysinfo{SYSTEM}{$k}\n";
}
}
my $core_info = '';
if (exists $sysinfo{CPU}{'cpu cores'}) {
my $nsockets = $sysinfo{CPU}{'processor'}/($sysinfo{CPU}{'cpu cores'}||1);
$core_info = qq{
$nsockets Sockets
$sysinfo{CPU}{'cpu cores'} Cores per CPU
};
}
if ($hugepage_info) {
$hugepage_info = </sys/kernel/mm/transparent_hugepage/
EOF
}
if (exists $sysinfo{KERNEL}{'kernel'} || exists $sysinfo{CPU}{'processor'}) {
print <
Memory
EOF
}
if (exists $sysinfo{DF} || exists $sysinfo{MOUNT}) {
my @df_infos = ();
push(@df_infos, @{$sysinfo{DF}}) if (exists $sysinfo{DF});
my @mount_infos = ();
push(@mount_infos, @{$sysinfo{MOUNT}}) if (exists $sysinfo{MOUNT});
print <