pax_global_header00006660000000000000000000000064147025047540014522gustar00rootroot0000000000000052 comment=f4c96163451ce937d245144cc9fe964d480c5c53 pgstats-REL1_4_0/000077500000000000000000000000001470250475400137165ustar00rootroot00000000000000pgstats-REL1_4_0/.gitignore000066400000000000000000000000711470250475400157040ustar00rootroot00000000000000/pgcsvstat /pgdisplay /pgreport /pgstat /pgwaitevent *.o pgstats-REL1_4_0/BUGS000066400000000000000000000000001470250475400143670ustar00rootroot00000000000000pgstats-REL1_4_0/License000066400000000000000000000020751470250475400152270ustar00rootroot00000000000000These softwares, pgcsvstat, pgdisplay, pgreport, and pgstat, are released under the terms of the PostgreSQL License. Copyright (c) 2011-2024, Guillaume Lelarge Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL Guillaume Lelarge BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Guillaume Lelarge HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Guillaume Lelarge SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Guillaume Lelarge HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pgstats-REL1_4_0/Makefile000066400000000000000000000014061470250475400153570ustar00rootroot00000000000000PGFILEDESC = "Statistics utilities" PGAPPICON = win32 PROGRAMS = pgcsvstat pgstat pgdisplay pgwaitevent pgreport PGFELIBS = pgfe_connect_utils.o pgfe_query_utils.o pgfe_cancel.o PG_CPPFLAGS = -I$(libpq_srcdir) PG_LIBS = $(libpq_pgport) SCRIPTS_built = pgcsvstat pgstat pgdisplay pgwaitevent pgreport EXTRA_CLEAN = $(addsuffix .o, $(PROGRAMS)) $(PGFELIBS) PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) all: $(PROGRAMS) %: %.o $(WIN32RES) $(CC) $(CFLAGS) $^ $(libpq_pgport) $(LDFLAGS) -L $(pkglibdir) -lpgcommon -lpgport -lm -o $@$(X) pgcsvstat: pgcsvstat.o $(PGFELIBS) pgdisplay: pgdisplay.o $(PGFELIBS) pgstat: pgstat.o $(PGFELIBS) pgwaitevent: pgwaitevent.o $(PGFELIBS) pgreport: pgreport.o $(PGFELIBS) pgreport.o: pgreport_queries.h pgstats-REL1_4_0/README.md000066400000000000000000001056241470250475400152050ustar00rootroot00000000000000README ====== This repository contains the source of a collection of tools. pgstat is a vmstat-like tool for PostgreSQL. pgreport is a reporting tool for PostgreSQL. It tries to get a lot of informations from the metadata and statistics of PostgreSQL. pgwaitevent gathers every wait event for a specific PID, grouping them by queries. pgcsvstat outputs PostgreSQL statistics views into CSV files. The idea is that you can load them on any spreadsheet to get the graphs you want. pgdisplay tries to display a table in an informative way. Still pretty much experimental. They all should be compatible with the latest PostgreSQL release (13 right now), and down to the oldest stable release (9.5 right now). They may also be compatible with much older releases (8.x for most of them). Requirements ------------ To compile these tools, you will need the libpq library (.so), the libpgcommon, and libpgport libraries (.a), the PostgreSQL 14+ header files, and the pg_config tool. The header files and the tool are usually available in a -dev package. To use them once compiled, you only need the libpq library. Any version should be fine. Compilation ----------- You only have to do: ``` make make install ``` Usage ----- Use --help to get informations on all command line options for these three tools. More informations on pgstat --------------------------- pgstat is an online command tool that connects to a database and grabs its activity statistics. As PostgreSQL has many statistics, you have a command switch to choose the one you want (-s): * archiver for pg_stat_archiver (9.4+) * bgwriter for pg_stat_bgwriter * checkpointer for pg_stat_checkpointer (17+) * connection for connections by type (9.2+) * database for pg_stat_database * table for pg_stat_all_tables * tableio for pg_statio_all_tables * index for pg_stat_all_indexes * function for pg_stat_user_function * statement for pg_stat_statements * xlog for xlog writes (9.2+) * tempfile for temporary file usage * waitevent for wait events usage (9.6+) * progress_analyze to get the progress on an ANALYZE statement (13+) * progress_basebackup to get the progress on a BASE BACKUP (replication) statement (13+) * progress_cluster to get the progress on a CLUSTER/VACUUM FULL statement (12+) * progress_createindex to get the progress on a CREATE INDEX statement (12+) * progress_vacuum to get the progress on a VACUUM statement (9.6+) * pbpools for pgBouncer pools statistics * pbstats for pgBouncer general statistics It looks a lot like vmstat. You ask it the statistics you want, and the frequency to gather these statistics. Just like this: ``` $ pgstat -s connection - total - active - lockwaiting - idle in transaction - idle - 1546 15 0 0 1531 1544 17 0 0 1527 1544 14 0 0 1530 1546 26 0 0 1520 1543 21 0 0 1522 ``` Yeah, way too many idle connections. Actually, way too many connections. Definitely needs a pooler there. This is what happens on a 10-seconds 10-clients pgbench test: ``` $ pgstat -s database 1 - backends - ------ xacts ------ -------------- blocks -------------- -------------- tuples -------------- ------ temp ------ ------- misc -------- commit rollback read hit read_time write_time ret fet ins upd del files bytes conflicts deadlocks 1 224041 17 24768 2803774 0 0 4684398 234716 2105701 16615 113 1 14016512 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 3 0 0 205 0 0 92 92 0 0 0 0 0 0 0 11 20 0 0 500 0 0 1420 184 0 1 0 0 0 0 0 11 69 0 1 4438 0 0 1736 986 68 204 0 0 0 0 0 11 136 0 12 4406 0 0 1767 270 135 405 0 0 0 0 0 11 108 0 0 3434 0 0 1394 214 107 321 0 0 0 0 0 11 96 0 0 3290 0 0 1240 190 95 285 0 0 0 0 0 11 125 0 0 4045 0 0 1620 248 124 372 0 0 0 0 0 11 126 0 0 4222 0 0 1628 250 125 375 0 0 0 0 0 11 111 0 0 3644 0 0 1436 220 110 330 0 0 0 0 0 11 78 0 0 2549 0 0 1918 161 75 225 0 0 0 0 0 11 118 0 0 3933 0 0 1524 234 117 351 0 0 0 0 0 1 130 0 0 4276 0 0 1685 258 129 387 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` You clearly see when it starts, when it stops, and what it did during the 10 seconds. You can filter on a specific database with the -f command line option. Here is what happens at the tables level: ``` $ pgstat -s table -d b1 1 -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 68553 1467082 264957 266656 7919869 59312 113 57262 4611779 3782 5401 22 10 4 22 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 231 2351 1116 1222 61 184 0 180 61 124 245 2 0 0 0 431 1750 240 240 120 360 0 358 120 242 480 0 0 0 0 385 1640 220 220 110 330 0 327 110 11 440 0 0 0 0 340 1475 190 190 95 285 0 285 95 189 380 0 0 0 0 398 1651 222 222 111 333 0 331 111 -2 444 0 0 0 0 353 1519 198 198 99 297 0 293 99 200 396 0 0 0 0 335 1453 186 186 93 279 0 274 93 -210 372 0 0 0 0 446 1838 256 256 128 384 0 381 128 104 512 0 0 0 0 425 1739 238 238 119 357 0 354 119 241 476 0 0 0 0 360 1552 204 204 102 306 0 305 102 -10 408 0 0 0 0 386 1629 218 218 109 327 0 325 109 57 436 0 0 0 0 437 1761 242 242 121 363 0 363 121 -292 484 0 0 0 0 373 1563 206 206 103 309 0 305 103 -1 412 0 0 0 0 323 1442 184 184 92 276 0 273 92 188 368 0 0 0 0 412 1706 232 232 116 348 0 346 116 76 464 0 0 0 0 291 1332 164 164 82 246 0 245 82 -216 328 0 0 0 0 189 1013 106 106 53 159 0 158 53 106 212 0 0 0 0 346 1508 196 196 98 294 0 290 98 -18 392 0 0 0 0 304 1376 172 172 86 258 0 258 86 -156 344 0 0 0 0 442 1794 248 248 124 372 0 368 124 -260 496 0 0 0 0 9 1371 157 260 0 13 0 13 -11602 -329 -6053 0 2 0 3 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` You can also filter by table name with the -f command line switch: ``` $ pgstat -s table -d b1 -f pgbench_history 1 -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 0 0 0 0 21750 0 0 0 2022 0 0 1 0 1 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 64 0 0 0 64 0 64 0 0 0 0 0 0 0 0 122 0 0 0 122 0 122 0 0 0 0 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 88 0 0 0 88 0 88 0 0 0 0 0 0 0 0 116 0 0 0 116 0 116 0 0 0 0 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 61 0 0 0 61 0 61 0 0 0 0 0 0 0 0 42 0 0 0 42 0 42 0 0 0 0 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 0 0 0 0 55 0 0 0 55 0 55 0 0 0 0 0 0 0 0 121 0 0 0 121 0 121 0 0 0 0 0 0 0 0 68 0 0 0 -1942 0 -1011 0 0 0 1 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 109 0 0 0 109 0 109 0 0 0 0 0 0 0 0 94 0 0 0 94 0 94 0 0 0 0 0 0 0 0 120 0 0 0 120 0 120 0 0 0 0 0 0 0 0 110 0 0 0 110 0 110 0 0 0 0 0 0 0 0 100 0 0 0 100 0 100 0 0 0 0 0 0 0 0 115 0 0 0 115 0 115 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` We see that the activity on this table is quite different from what happens to the other tables. There's also a report from the pg_stat_statements extension. It works pretty well: ``` $ pgstat -s statement -d b1 --------- misc ---------- ----------- shared ----------- ----------- local ----------- ----- temp ----- -------- time -------- calls time rows hit read dirty written hit read dirty written read written read written 383843 1756456.50 13236523 9277049 38794 50915 1640 1008844 17703 8850 8850 1711 1711 0.00 0.00 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.50 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 310 2709.88 220 1527 10 63 0 0 0 0 0 0 0 0.00 0.00 797 8555.00 569 3736 10 109 0 0 0 0 0 0 0 0.00 0.00 725 9215.25 519 3610 23 115 0 0 0 0 0 0 0 0.00 0.00 266 7729.38 190 1257 2 43 0 0 0 0 0 0 0 0.00 0.00 831 10196.12 594 3988 11 112 0 0 0 0 0 0 0 0.00 0.00 788 8678.38 563 3803 8 92 0 0 0 0 0 0 0 0.00 0.00 736 9080.62 526 3616 7 89 0 0 0 0 0 0 0 0.00 0.00 792 8395.50 566 3742 11 96 0 0 0 0 0 0 0 0.00 0.00 814 9346.75 582 3985 9 84 0 0 0 0 0 0 0 0.00 0.00 763 8941.12 545 3799 9 84 0 0 0 0 0 0 0 0.00 0.00 728 8543.25 520 3549 8 62 0 0 0 0 0 0 0 0.00 0.00 589 9143.62 421 2812 7 45 0 0 0 0 0 0 0 0.00 0.00 785 8710.00 561 3788 4 60 0 0 0 0 0 0 0 0.00 0.00 785 9117.25 561 3885 4 60 0 0 0 0 0 0 0 0.00 0.00 785 8397.12 561 3788 1 52 0 0 0 0 0 0 0 0.00 0.00 799 9398.12 571 3925 7 60 0 0 0 0 0 0 0 0.00 0.00 765 9033.88 547 3757 3 43 0 0 0 0 0 0 0 0.00 0.00 805 8663.25 575 3886 6 57 0 0 0 0 0 0 0 0.00 0.00 765 8490.50 547 3661 7 39 0 0 0 0 0 0 0 0.00 0.00 764 8850.00 546 3698 4 41 0 0 0 0 0 0 0 0.00 0.00 396 6706.50 283 1992 1 14 0 0 0 0 0 0 0 0.00 0.00 1 0.38 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.62 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 ``` You can filter a specific statement by its query id. Of course, it first searches for the extension, and complains if it isn't there: ``` $ pgstat -s statement -d b2 pgstat: Cannot find the pg_stat_statements extension. ``` You can filter by group of columns. That only works for the biggest stats, meaning database and statement. For example : ``` $ ./pgstat -s database -S backends,tuples - backends - -------------- tuples -------------- ret fet ins upd del 1 32167008 7201488 24524575 433 1357 1 0 0 0 0 0 1 162 154 0 0 0 1 4 0 0 0 0 1 897 18 0 0 0 1 4 0 0 0 0 $ ./pgstat -s statement -S exec,wal --------- exec ---------- -------------- wal -------------- calls time rows wal_records wal_fpi wal_bytes 96 11.37 96 2 2 6882 1 0.20 1 0 0 0 1 0.16 1 0 0 0 1 0.28 1 0 0 0 ``` One of my customers had a lot of writes on their databases, and I wanted to know how much writes occured in the WAL files. vmstat would only tell me how much writes on all files, but I was only interested in WAL writes. So I added a new report that grabs the current XLOG position, and diff it with the previous XLOG position. It gives something like this with a pgbench test: ``` $ ./pgstat -s xlog -------- filename -------- -- location -- ---- bytes ---- 00000001000000000000003E 0/3EC49940 1053071680 00000001000000000000003E 0/3EC49940 0 00000001000000000000003E 0/3EC49940 0 00000001000000000000003E 0/3EC875F8 253112 00000001000000000000003E 0/3ED585C8 856016 00000001000000000000003E 0/3EE36C40 910968 00000001000000000000003E 0/3EEFCC58 811032 00000001000000000000003E 0/3EFAB9D0 716152 00000001000000000000003F 0/3F06A3C0 780784 00000001000000000000003F 0/3F0E79E0 513568 00000001000000000000003F 0/3F1354E0 318208 00000001000000000000003F 0/3F1F6218 789816 00000001000000000000003F 0/3F2BCE00 814056 00000001000000000000003F 0/3F323240 418880 00000001000000000000003F 0/3F323240 0 00000001000000000000003F 0/3F323240 0 ``` That's not big numbers, so it's easy to find it writes at 253K/s, but if the number were bigger, it might get hard to read. One of my co-worker, Julien Rouhaud, added a human readable option: ``` $ ./pgstat -s xlog -H -------- filename -------- -- location -- ---- bytes ---- 00000001000000000000003F 0/3F32EDC0 1011 MB 00000001000000000000003F 0/3F32EDC0 0 bytes 00000001000000000000003F 0/3F32EDC0 0 bytes 00000001000000000000003F 0/3F3ABC78 500 kB 00000001000000000000003F 0/3F491C10 920 kB 00000001000000000000003F 0/3F568548 858 kB 00000001000000000000003F 0/3F634748 817 kB 00000001000000000000003F 0/3F6F4378 767 kB 00000001000000000000003F 0/3F7A56D8 709 kB 00000001000000000000003F 0/3F8413D0 623 kB 00000001000000000000003F 0/3F8D7590 600 kB 00000001000000000000003F 0/3F970160 611 kB 00000001000000000000003F 0/3F9F2840 522 kB 00000001000000000000003F 0/3FA1FD88 181 kB 00000001000000000000003F 0/3FA1FD88 0 bytes 00000001000000000000003F 0/3FA1FD88 0 bytes 00000001000000000000003F 0/3FA1FD88 0 bytes ``` That's indeed much more readable if you ask me. Another customer wanted to know how many temporary files were written, and their sizes. Of course, you can get that with the pg_stat_database view, but it only gets added when the query is done. We wanted to know when the query is executed. So I added another report: ``` $ ./pgstat -s tempfile --- size --- --- count --- 0 0 0 0 13082624 1 34979840 1 56016896 1 56016896 1 56016896 1 0 0 0 0 ``` You see the file being stored. Since release 9.6, there are some very interesting progress views. Here is an example that shows the VACUUM progress on a table. We can see the progress while it goes through different phases. ``` $ ./pgstat -s progressvacuum --------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- database relation size %scan %vacuum #index %dead tuple bdd_alfresco alf_node_properties 254 GB scanning heap 39.95 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 39.98 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.01 0.00 0 0.21 bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 40.02 0.00 0 0.21 bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 40.07 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.10 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.13 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.15 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.17 0.00 0 0.21 ... bdd_alfresco alf_node_properties 254 GB scanning heap 47.10 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.13 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.16 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.18 0.00 0 0.48 bdd_alfresco alf_node_properties 254 GB scanning heap 47.21 0.00 0 0.48 ... bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 ... ``` Information shown depends on the progress views. More informations on pgwaitevent -------------------------------- The pgwaitevent tool waits the execution of a query on a specific PID backend. It then gathers all the waiting events, and sums them up. At the end of the query, it prints a table with the waiting events, their occurences, and percentage. Here is an exemple of a session with this tool: ``` $ ./pgwaitevent -i 0.1 548292 Tracing wait events for PID 548292, sampling at 0.100s New query: truncate t1; Query duration: 00:00:00.324883 Trace duration: 00:00:00.313353 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 3 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:02.077609 Trace duration: 00:00:02.038534 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALSync │ IO │ 12 │ 60.00 │ │ [Running] │ │ 5 │ 25.00 │ │ WALWriteLock │ LWLock │ 3 │ 15.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<5000; Query duration: 00:00:00.207713 Trace duration: 00:00:00.108132 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 1 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<500000; Query duration: 00:00:00.357929 Trace duration: 00:00:00.312559 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 3 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.908082 Trace duration: 00:00:01.8308 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALWriteLock │ LWLock │ 6 │ 33.33 │ │ [Running] │ │ 5 │ 27.78 │ │ WALSync │ IO │ 4 │ 22.22 │ │ WALWrite │ IO │ 2 │ 11.11 │ │ DataFileExtend │ IO │ 1 │ 5.56 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.602976 Trace duration: 00:00:01.524851 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALSync │ IO │ 7 │ 46.67 │ │ [Running] │ │ 4 │ 26.67 │ │ WALWriteLock │ LWLock │ 3 │ 20.00 │ │ WALWrite │ IO │ 1 │ 6.67 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.638675 Trace duration: 00:00:01.630696 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 8 │ 50.00 │ │ WALWriteLock │ LWLock │ 4 │ 25.00 │ │ WALSync │ IO │ 4 │ 25.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<500000; Query duration: 00:00:00.893073 Trace duration: 00:00:00.819036 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 8 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: create index on t1(id); Query duration: 00:00:04.051142 Trace duration: 00:00:03.955806 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 15 │ 38.46 │ │ WALSync │ IO │ 15 │ 38.46 │ │ DataFileImmediateSync │ IO │ 5 │ 12.82 │ │ WALWriteLock │ LWLock │ 4 │ 10.26 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ No more session with PID 548292, exiting... ``` It sleeps 100msec before checking if a new query is being executed. It checks waiting events on an interval set up with the `-i` command line option. By default, it's 1 second (which is a bit on the high end). Starting with PostgreSQL 13, pgwaitevent is able to include leader and workers. You need the -g command line option for this. Ideas ----- * pgstat * pg_stat_archiver: display the current wal and the last archived * pg_stat_archiver: display the duration since the last archived wal * pg_stat_X_tables: display the duration since the last vacuum and analyze * sum the number of archived wal files * add a report for pg_stat_database_conflicts * add a report for pg_stat_io * add a report for pg_stat_recovery_prefetch * add a report for pg_stat_replication * add a report for pg_stat_replication_slots * add a report for pg_stat_subscription * add a report for pg_stat_subscription_stats * add a report for pg_stat_wal_receiver * pgcsvstat * check for anything missing in this tool * pgreport * get data stats (idea from Christophe Courtois) pgstats-REL1_4_0/pgcsvstat.c000066400000000000000000001041321470250475400161010ustar00rootroot00000000000000/* * pgcsvstat, a PostgreSQL app to gather statistical informations * from a PostgreSQL database. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2011-2024. * * pgstats/pgcsvstat.c */ /* * System headers */ #include /* * PostgreSQL headers */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/cancel.h" #include "fe_utils/connect_utils.h" extern char *optarg; /* * Defines */ #define PGCSVSTAT_VERSION "1.4.0" /* these are the opts structures for command line params */ struct options { bool quiet; bool nodb; char *directory; char *dbname; char *hostname; char *port; char *username; int major; int minor; }; /* global variables */ struct options *opts; PGconn *conn; /* function prototypes */ static void help(const char *progname); void get_opts(int, char **); void *myalloc(size_t size); char *mystrdup(const char *str); int sql_exec(const char *sql, const char *filename, bool quiet); void sql_exec_dump_pgstatactivity(void); void sql_exec_dump_pgstatarchiver(void); void sql_exec_dump_pgstatbgwriter(void); void sql_exec_dump_pgstatcheckpointer(void); void sql_exec_dump_pgstatdatabase(void); void sql_exec_dump_pgstatdatabaseconflicts(void); void sql_exec_dump_pgstatreplication(void); void sql_exec_dump_pgstatreplicationslots(void); void sql_exec_dump_pgstatslru(void); void sql_exec_dump_pgstatsubscription(void); void sql_exec_dump_pgstatwal(void); void sql_exec_dump_pgstatwalreceiver(void); void sql_exec_dump_pgstatalltables(void); void sql_exec_dump_pgstatallindexes(void); void sql_exec_dump_pgstatioalltables(void); void sql_exec_dump_pgstatioallindexes(void); void sql_exec_dump_pgstatioallsequences(void); void sql_exec_dump_pgstatuserfunctions(void); void sql_exec_dump_pgclass_size(void); void sql_exec_dump_pgstatstatements(void); void sql_exec_dump_xlog_stat(void); void sql_exec_dump_pgstatprogressanalyze(void); void sql_exec_dump_pgstatprogressbasebackup(void); void sql_exec_dump_pgstatprogresscluster(void); void sql_exec_dump_pgstatprogresscopy(void); void sql_exec_dump_pgstatprogresscreateindex(void); void sql_exec_dump_pgstatprogressvacuum(void); void fetch_version(void); bool check_superuser(void); bool backend_minimum_version(int major, int minor); bool backend_has_pgstatstatements(void); /* function to parse command line options and check for some usage errors. */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->quiet = false; opts->nodb = false; opts->directory = NULL; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgcsvstats " PGCSVSTAT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:p:U:d:D:q")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = mystrdup(optarg); break; /* specify the directory */ case 'D': opts->directory = mystrdup(optarg); break; /* don't show headers */ case 'q': opts->quiet = true; break; /* host to connect to */ case 'h': opts->hostname = mystrdup(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = mystrdup(optarg); break; /* username */ case 'U': opts->username = mystrdup(optarg); break; default: pg_log_error("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } } static void help(const char *progname) { printf("%s gathers statistics from a PostgreSQL database.\n\n" "Usage:\n" " %s [OPTIONS]...\n" "\nGeneral options:\n" " -d DBNAME database to connect to\n" " -D DIRECTORY directory for stats files (defaults to current)\n" " -q quiet\n" " --help show this help, then exit\n" " --version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" "\nIt creates CSV files for each report.\n\n" "Report bugs to .\n", progname, progname); } void * myalloc(size_t size) { void *ptr = malloc(size); if (!ptr) { pg_log_error("out of memory (myalloc)"); exit(EXIT_FAILURE); } return ptr; } char * mystrdup(const char *str) { char *result = strdup(str); if (!result) { pg_log_error("out of memory (mystrdup)"); exit(EXIT_FAILURE); } return result; } /* * Actual code to extrac statistics from the database * and to store the output data in CSV files. */ int sql_exec(const char *query, const char* filename, bool quiet) { PGresult *res; FILE *fdcsv; struct stat st; int nfields; int nrows; int i, j; int size; /* open the csv file */ fdcsv = fopen(filename, "a"); if (!fdcsv) { pg_log_error("Cannot open file %s, errno %d\n", filename, errno); PQfinish(conn); exit(EXIT_FAILURE); } /* get size of file */ stat(filename, &st); size = st.st_size; /* make the call */ res = PQexec(conn, query); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s\n", PQerrorMessage(conn)); pg_log_info("query was: %s\n", query); PQclear(res); PQfinish(conn); exit(-1); } /* get the number of fields */ nrows = PQntuples(res); nfields = PQnfields(res); /* print a header */ if (!quiet && size == 0) { for (j = 0; j < nfields; j++) { fprintf(fdcsv, "%s", PQfname(res, j)); if (j < nfields - 1) fprintf(fdcsv, ";"); } fprintf(fdcsv, "\n"); } /* for each row, dump the information */ for (i = 0; i < nrows; i++) { for (j = 0; j < nfields; j++) { fprintf(fdcsv, "%s", PQgetvalue(res, i, j)); if (j < nfields - 1) fprintf(fdcsv, ";"); } fprintf(fdcsv, "\n"); } /* cleanup */ PQclear(res); /* close the csv file */ fclose(fdcsv); return 0; } /* * Dump all activities. */ void sql_exec_dump_pgstatactivity() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), datid, datname, %s, %s" "usesysid, usename, %s%s%s%s%s" "date_trunc('seconds', query_start) AS query_start, " "%s%s%s%s%s%s%s state " "FROM pg_stat_activity " "ORDER BY %s", backend_minimum_version(9, 2) ? "pid" : "procpid", backend_minimum_version(13, 0) ? "leader_pid, " : "", backend_minimum_version(9, 0) ? "application_name, " : "", backend_minimum_version(8, 1) ? "client_addr, " : "", backend_minimum_version(9, 1) ? "client_hostname, " : "", backend_minimum_version(8, 1) ? "client_port, date_trunc('seconds', backend_start) AS backend_start, " : "", backend_minimum_version(8, 3) ? "date_trunc('seconds', xact_start) AS xact_start, " : "", backend_minimum_version(9, 2) ? "state_change, " : "", backend_minimum_version(9, 6) ? "wait_event_type, wait_event, " : backend_minimum_version(8, 2) ? "waiting, " : "", backend_minimum_version(9, 4) ? "backend_xid, " : "", backend_minimum_version(9, 4) ? "backend_xmin, " : "", backend_minimum_version(14, 0) ? "query_id, " : "", backend_minimum_version(9, 2) ? "query, " : "current_query,", backend_minimum_version(10, 0) ? "backend_type, " : "", backend_minimum_version(9, 2) ? "pid" : "procpid"); // the last one is for the ORDER BY snprintf(filename, sizeof(filename), "%s/pg_stat_activity.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all bgwriter stats. */ void sql_exec_dump_pgstatbgwriter() { char query[1024]; char filename[1024]; if (backend_minimum_version(17, 0)) { snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), buffers_clean, " "maxwritten_clean, buffers_alloc, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_bgwriter "); } else { snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), checkpoints_timed, " "checkpoints_req, %sbuffers_checkpoint, buffers_clean, " "maxwritten_clean, buffers_backend, %sbuffers_alloc%s " "FROM pg_stat_bgwriter ", backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "", backend_minimum_version(9, 1) ? "buffers_backend_fsync, " : "", backend_minimum_version(9, 1) ? ", date_trunc('seconds', stats_reset) AS stats_reset " : ""); } snprintf(filename, sizeof(filename), "%s/pg_stat_bgwriter.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all checkpointer stats. */ void sql_exec_dump_pgstatcheckpointer() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), num_timed, num_requested, " "restartpoints_timed, restartpoints_req, restartpoints_done, " "write_time, sync_time, buffers_written, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_checkpointer "); snprintf(filename, sizeof(filename), "%s/pg_stat_checkpointer.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all archiver stats. */ void sql_exec_dump_pgstatarchiver() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), archived_count, " "last_archived_wal, date_trunc('seconds', last_archived_time) AS last_archived_time, " "failed_count, " "last_failed_wal, date_trunc('seconds', last_failed_time) AS last_failed_time, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_archiver "); snprintf(filename, sizeof(filename), "%s/pg_stat_archiver.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all databases stats. */ void sql_exec_dump_pgstatdatabase() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), datid, datname, " "numbackends, xact_commit, xact_rollback, blks_read, blks_hit" "%s%s%s%s%s " "FROM pg_stat_database " "ORDER BY datname", backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "", backend_minimum_version(9, 1) ? ", conflicts, date_trunc('seconds', stats_reset) AS stats_reset" : "", backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : "", backend_minimum_version(12, 0) ? ", checksum_failures, checksum_last_failure" : "", backend_minimum_version(14, 0) ? ", session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_database.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all databases conflicts stats. */ void sql_exec_dump_pgstatdatabaseconflicts() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_stat_database_conflicts " "ORDER BY datname"); snprintf(filename, sizeof(filename), "%s/pg_stat_database_conflicts.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all replication stats. */ void sql_exec_dump_pgstatreplication() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), %s, usesysid, usename, " "application_name, client_addr, client_hostname, client_port, " "date_trunc('seconds', backend_start) AS backend_start, %sstate, " "%s AS master_location, %s%s" "sync_priority, " "sync_state%s " "FROM pg_stat_replication " "ORDER BY application_name", backend_minimum_version(9, 2) ? "pid" : "procpid", backend_minimum_version(9, 4) ? "backend_xmin, " : "", backend_minimum_version(10, 0) ? "pg_current_wal_lsn()" : "pg_current_xlog_location()", backend_minimum_version(10, 0) ? "sent_lsn, write_lsn, flush_lsn, replay_lsn, " : "sent_location, write_location, flush_location, replay_location, ", backend_minimum_version(10, 0) ? "write_lag, flush_lag, replay_lag, " : "", backend_minimum_version(12, 0) ? ", reply_time" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_replication.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all replication slots stats. */ void sql_exec_dump_pgstatreplicationslots() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), slot_name, " "spill_txns, spill_count, spill_bytes, " "stream_txns, stream_count, stream_bytes, " "total_txns, total_bytes, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_replication_slots " "ORDER BY slot_name"); snprintf(filename, sizeof(filename), "%s/pg_stat_replication_slots.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all SLRU stats. */ void sql_exec_dump_pgstatslru() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), name, " "blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, " "flushes, truncates, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_slru " "ORDER BY name"); snprintf(filename, sizeof(filename), "%s/pg_stat_slru.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all subscriptions stats. */ void sql_exec_dump_pgstatsubscription() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), subid, subname%s, " "pid%s, relid, relname, received_lsn, " "date_trunc('seconds', last_msg_send_time) AS last_msg_send_time, " "date_trunc('seconds', last_msg_receipt_time) AS last_msg_receipt_time, " "latest_end_lsn, date_trunc('seconds', latest_end_time) AS latest_end_time " "FROM pg_stat_subscription s " "LEFT JOIN pg_class c ON c.oid=s.relid " "ORDER BY subid", backend_minimum_version(17, 0) ? ", worker_type" : "", backend_minimum_version(16, 0) ? ", leader_pid" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_subscription.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all WAL stats. */ void sql_exec_dump_pgstatwal() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), " "wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, " "wal_sync, wal_write_time, wal_sync_time, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_wal"); snprintf(filename, sizeof(filename), "%s/pg_stat_wal.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all wal receiver stats. */ void sql_exec_dump_pgstatwalreceiver() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT pid, status, receive_start_lsn, receive_start_tli, " "written_lsn, flushed_lsn, received_tli, " "date_trunc('seconds', last_msg_send_time) last_msg_send_time, " "date_trunc('seconds', last_msg_receipt_time) last_msg_receipt_time, " "latest_end_lsn, date_trunc('seconds', latest_end_time) latest_end_time, " "slot_name, sender_host, sender_port, conninfo " "FROM pg_stat_wal_receiver " "ORDER BY pid"); snprintf(filename, sizeof(filename), "%s/pg_stat_wal_receiver.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all tables stats. */ void sql_exec_dump_pgstatalltables() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), relid, schemaname, relname, " "seq_scan%s, seq_tup_read, idx_scan%s, idx_tup_fetch, " "n_tup_ins, n_tup_upd, n_tup_del" "%s%s%s%s%s%s%s " "FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname", backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_seq_scan) AS last_seq_scan" : "", backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_idx_scan) AS last_idx_scan" : "", backend_minimum_version(8, 3) ? ", n_tup_hot_upd" : "", backend_minimum_version(16, 0) ? ", n_tup_newpage_upd" : "", backend_minimum_version(8, 3) ? ", n_live_tup, n_dead_tup" : "", backend_minimum_version(9, 4) ? ", n_mod_since_analyze" : "", backend_minimum_version(13, 0) ? ", n_ins_since_vacuum" : "", backend_minimum_version(8, 2) ? ", date_trunc('seconds', last_vacuum) AS last_vacuum, date_trunc('seconds', last_autovacuum) AS last_autovacuum, date_trunc('seconds',last_analyze) AS last_analyze, date_trunc('seconds',last_autoanalyze) AS last_autoanalyze" : "", backend_minimum_version(9, 1) ? ", vacuum_count, autovacuum_count, analyze_count, autoanalyze_count" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_all_tables.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all indexes stats. */ void sql_exec_dump_pgstatallindexes() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), " "relid, indexrelid, schemaname, relname, indexrelname, " "idx_scan%s, idx_tup_read, idx_tup_fetch " "FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname", backend_minimum_version(16, 0) ? ", date_trunc('seconds', last_idx_scan) AS last_idx_scan" : "" ); snprintf(filename, sizeof(filename), "%s/pg_stat_all_indexes.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all tables IO stats. */ void sql_exec_dump_pgstatioalltables() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_tables.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all indexes IO stats. */ void sql_exec_dump_pgstatioallindexes() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_indexes " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_indexes.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all sequences IO stats. */ void sql_exec_dump_pgstatioallsequences() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_sequences " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_sequences.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all functions stats. */ void sql_exec_dump_pgstatuserfunctions() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, funcname"); snprintf(filename, sizeof(filename), "%s/pg_stat_user_functions.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all size class stats. */ void sql_exec_dump_pgclass_size() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), n.nspname, c.relname, c.relkind, " "c.reltuples, c.relpages%s%s " "FROM pg_class c " "JOIN pg_namespace n ON n.oid=c.relnamespace " "WHERE n.nspname <> 'information_schema' " "ORDER BY n.nspname, c.relname", backend_minimum_version(9, 2) ? ", c.relallvisible" : "", backend_minimum_version(8, 1) ? ", pg_relation_size(c.oid)" : ""); snprintf(filename, sizeof(filename), "%s/pg_class_size.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all statements stats. * to be fixed wrt v14 */ void sql_exec_dump_pgstatstatements() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), r.rolname, d.datname, " "%sregexp_replace(query, E'\n', ' ', 'g') as query, %scalls, %s, rows, " "shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, " "local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, " "temp_blks_read, temp_blks_written%s%s%s%s%s%s " "FROM pg_stat_statements q " "LEFT JOIN pg_database d ON q.dbid=d.oid " "LEFT JOIN pg_roles r ON q.userid=r.oid " "ORDER BY r.rolname, d.datname", backend_minimum_version(14, 0) ? "toplevel, queryid, " : "", backend_minimum_version(13, 0) ? "plans, total_plan_time, min_plan_time, max_plan_time, mean_plan_time, stddev_plan_time, " : "", backend_minimum_version(13, 0) ? "total_exec_time, min_exec_time, max_exec_time, mean_exec_time, stddev_exec_time" : "total_time", backend_minimum_version(17, 0) ? ", shared_blk_read_time, shared_blk_write_time, local_blk_read_time, local_blk_write_time" : ", blk_read_time, blk_write_time", backend_minimum_version(15, 0) ? ", temp_blk_read_time, temp_blk_write_time" : "", backend_minimum_version(13, 0) ? ", wal_records, wal_fpi, wal_bytes" : "", backend_minimum_version(15, 0) ? ", jit_functions, jit_generation_time, jit_inlining_count, jit_inlining_time, jit_optimization_count, jit_optimization_time, jit_emission_count, jit_emission_time" : "", backend_minimum_version(17, 0) ? ", date_trunc('seconds', stats_since) AS stats_since " : "", backend_minimum_version(17, 0) ? ", date_trunc('seconds', minmax_stats_since) AS minmax_stats_since " : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_statements.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all xlog stats. */ void sql_exec_dump_xlog_stat() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), backend_minimum_version(10, 0) ? "SELECT date_trunc('seconds', now()), pg_walfile_name(pg_current_wal_lsn())=pg_ls_dir AS current, pg_ls_dir AS filename, " "(SELECT modification FROM pg_stat_file('pg_wal/'||pg_ls_dir)) AS modification_timestamp " "FROM pg_ls_dir('pg_wal') " "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " "ORDER BY pg_ls_dir" : "SELECT date_trunc('seconds', now()), pg_xlogfile_name(pg_current_xlog_location())=pg_ls_dir AS current, pg_ls_dir AS filename, " "(SELECT modification FROM pg_stat_file('pg_xlog/'||pg_ls_dir)) AS modification_timestamp " "FROM pg_ls_dir('pg_xlog') " "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " "ORDER BY pg_ls_dir"); snprintf(filename, sizeof(filename), "%s/pg_xlog_stat.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump ANALYZE progress */ void sql_exec_dump_pgstatprogressanalyze() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, datid, datname, " "relid, relid::regclass relname, phase, sample_blks_total, " "sample_blks_scanned, ext_stats_total, ext_stats_computed, " "child_tables_total, child_tables_done, current_child_table_relid, " "current_child_table_relid::regclass current_child_table_relname " "FROM pg_stat_progress_analyze " "ORDER BY pid"); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_analyze.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump BASE BACKUP progress */ void sql_exec_dump_pgstatprogressbasebackup() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, phase, " "backup_total, backup_streamed, " "tablespaces_total, tablespaces_streamed " "FROM pg_stat_progress_basebackup " "ORDER BY pid"); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_basebackup.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump CLUSTER progress */ void sql_exec_dump_pgstatprogresscluster() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, datid, datname, " "relid, relid::regclass relname, command, phase, " "cluster_index_relid, cluster_index_relid::regclass cluster_index_relname, " "heap_tuples_scanned, heap_tuples_written, heap_blks_total, " "heap_blks_scanned, index_rebuild_count " "FROM pg_stat_progress_cluster " "ORDER BY pid"); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_cluster.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump COPY progress */ void sql_exec_dump_pgstatprogresscopy() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, datid, datname, " "relid, relid::regclass relname, command, type, " "bytes_processed, bytes_total, " "tuples_processed, tuples_excluded%s " "FROM pg_stat_progress_copy " "ORDER BY pid", backend_minimum_version(17, 0) ? ", tuples_excluded" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_copy.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump CREATE INDEX progress */ void sql_exec_dump_pgstatprogresscreateindex() { char query[1024]; char filename[1024]; snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, datid, datname, " "relid, relid::regclass relname, index_relid, index_relid::regclass index_relname, " "command, phase, lockers_total, lockers_done, current_locker_pid, " "blocks_total, blocks_done, tuples_total, tuples_done, " "partitions_total, partitions_done " "FROM pg_stat_progress_create_index " "ORDER BY pid"); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_create_index.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump VACUUM progress */ void sql_exec_dump_pgstatprogressvacuum() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), pid, datid, datname, " "relid, relid::regclass relname, phase, " "heap_blks_total, heap_blks_scanned, heap_blks_vacuumed, " "index_vacuum_count, %s, %s " "FROM pg_stat_progress_vacuum " "ORDER BY pid", backend_minimum_version(17, 0) ? "max_dead_tuple_bytes" : "max_dead_tuples", backend_minimum_version(17, 0) ? "dead_tuple_bytes" : "num_dead_tuples"); snprintf(filename, sizeof(filename), "%s/pg_stat_progress_vacuum.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char query[1024]; PGresult *res; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT version()"); /* make the call */ res = PQexec(conn, query); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s\n", PQerrorMessage(conn)); pg_log_info("query was: %s\n", query); PQclear(res); PQfinish(conn); exit(-1); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (!opts->quiet) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Check if user has the superuser attribute */ bool check_superuser() { PGresult *res; char sql[1024]; bool is_superuser = false; /* get the oid and database name from the system pg_database table */ snprintf(sql, sizeof(sql), "SELECT rolsuper FROM pg_roles WHERE rolname=current_user "); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s\n", PQerrorMessage(conn)); pg_log_info("query was: %s\n", sql); PQclear(res); PQfinish(conn); exit(-1); } /* get the information */ is_superuser = strncmp(PQgetvalue(res, 0, 0), "t", 1) == 0; /* cleanup */ PQclear(res); return is_superuser; } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Check if backend has the pg_stat_statements view */ bool backend_has_pgstatstatements() { PGresult *res; char sql[1024]; bool has_pgstatstatements = false; /* get the oid and database name from the system pg_database table */ snprintf(sql, sizeof(sql), "SELECT n.nspname, " "has_schema_privilege(c.relnamespace, 'USAGE') AS schema_priv, " "has_table_privilege(c.oid, 'SELECT') AS view_priv " "FROM pg_class c " "JOIN pg_namespace n ON c.relnamespace=n.oid " "WHERE c.relname='pg_stat_statements' AND c.relkind='v'"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s\n", PQerrorMessage(conn)); pg_log_info("query was: %s\n", sql); PQclear(res); PQfinish(conn); exit(-1); } /* get the information */ has_pgstatstatements = PQntuples(res)>0; /* if it's present, set search_path to access it */ if (has_pgstatstatements) { // check if user has rights to use schema if (!strcmp(PQgetvalue(res, 0, 1), "f")) { pg_log_warning("pg_stat_statements is available, but user has no right to use schema \"%s\"!", PQgetvalue(res, 0, 0)); has_pgstatstatements = false; } // check if user has rights to select view if (!strcmp(PQgetvalue(res, 0, 2), "f")) { pg_log_warning("pg_stat_statements is available, but user has no right to use view \"pg_stat_statements\"!"); has_pgstatstatements = false; } if (has_pgstatstatements) { snprintf(sql, sizeof(sql), "SET search_path TO %s", PQgetvalue(res, 0, 0)); /* cleanup */ PQclear(res); /* make the call to set search_path */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { pg_log_error("query failed: %s\n", PQerrorMessage(conn)); pg_log_info("query was: %s\n", sql); PQclear(res); PQfinish(conn); exit(-1); } } } /* cleanup */ PQclear(res); return has_pgstatstatements; } int main(int argc, char **argv) { const char *progname; ConnParams cparams; bool is_superuser = false; /* Initialize the logging interface */ pg_logging_init(argv[0]); /* Get the program name */ progname = get_progname(argv[0]); opts = (struct options *) myalloc(sizeof(struct options)); /* parse the opts */ get_opts(argc, argv); if (opts->dbname == NULL) { opts->dbname = "postgres"; opts->nodb = true; } if (opts->directory == NULL) { opts->directory = "./"; } /* Set the connection struct */ cparams.pghost = opts->hostname; cparams.pgport = opts->port; cparams.pguser = opts->username; cparams.dbname = opts->dbname; cparams.prompt_password = TRI_DEFAULT; cparams.override_dbname = NULL; /* Connect to the database */ conn = connectDatabase(&cparams, progname, false, false, false); /* get version */ fetch_version(); /* check superuser attribute */ is_superuser = check_superuser(); /* grab cluster stats info */ sql_exec_dump_pgstatactivity(); if (backend_minimum_version(9, 4)) sql_exec_dump_pgstatarchiver(); if (backend_minimum_version(8, 3)) sql_exec_dump_pgstatbgwriter(); if (backend_minimum_version(17, 0)) sql_exec_dump_pgstatcheckpointer(); sql_exec_dump_pgstatdatabase(); if (backend_minimum_version(9, 1)) { sql_exec_dump_pgstatdatabaseconflicts(); sql_exec_dump_pgstatreplication(); } if (backend_minimum_version(14, 0)) sql_exec_dump_pgstatreplicationslots(); if (backend_minimum_version(13, 0)) sql_exec_dump_pgstatslru(); if (backend_minimum_version(10, 0)) sql_exec_dump_pgstatsubscription(); if (backend_minimum_version(14, 0)) { sql_exec_dump_pgstatwal(); sql_exec_dump_pgstatwalreceiver(); } /* grab database stats info */ sql_exec_dump_pgstatalltables(); sql_exec_dump_pgstatallindexes(); sql_exec_dump_pgstatioalltables(); sql_exec_dump_pgstatioallindexes(); sql_exec_dump_pgstatioallsequences(); if (backend_minimum_version(8, 4)) sql_exec_dump_pgstatuserfunctions(); /* grab progress stats info */ if (backend_minimum_version(13, 0)) sql_exec_dump_pgstatprogressanalyze(); if (backend_minimum_version(13, 0)) sql_exec_dump_pgstatprogressbasebackup(); if (backend_minimum_version(12, 0)) sql_exec_dump_pgstatprogresscluster(); if (backend_minimum_version(14, 0)) sql_exec_dump_pgstatprogresscopy(); if (backend_minimum_version(12, 0)) sql_exec_dump_pgstatprogresscreateindex(); if (backend_minimum_version(10, 0)) sql_exec_dump_pgstatprogressvacuum(); /* grab other informations */ sql_exec_dump_pgclass_size(); if (backend_has_pgstatstatements()) sql_exec_dump_pgstatstatements(); if (backend_minimum_version(8, 2) && is_superuser) sql_exec_dump_xlog_stat(); PQfinish(conn); return 0; } pgstats-REL1_4_0/pgdisplay.c000066400000000000000000000235341470250475400160650ustar00rootroot00000000000000/* * pgdisplay, a PostgreSQL app to display a table * in an informative way. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2015-2024. * * pgstat/pgdisplay.c */ /* * System headers */ #include #include /* * PostgreSQL headers */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/connect_utils.h" /* * Defines */ #define PGDISPLAY_VERSION "0.0.1" #define PGSTAT_DEFAULT_STRING_SIZE 1024 #define couleur(param) printf("\033[48;2;255;%d;%dm",param,param) #define nocouleur() printf("\033[0m") /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; char *table; int groups; int blocksize; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif void display_fsm(char *table); void fetch_version(void); void fetch_blocksize(void); bool backend_minimum_version(int major, int minor); void allocate_struct(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s displays table in an informative way.\n\n" "Usage:\n" " %s [OPTIONS] [delay [count]]\n" "\nGeneral options:\n" " -G GROUPS # of groups of blocks\n" " -t TABLE table to display\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->groups = 20; opts->blocksize = 0; opts->table = NULL; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgdisplay " PGDISPLAY_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:p:U:d:t:G:v")) != -1) { switch (c) { /* specify the # of groups */ case 'G': opts->groups = atoi(optarg); break; /* specify the table */ case 't': opts->table = pg_strdup(optarg); break; /* don't show headers */ case 'v': opts->verbose = true; break; /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; default: errx(1, "Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } if (opts->table == NULL) { pg_log_error("missing table name\n"); exit(EXIT_FAILURE); } if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { pg_log_error("out of memory\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { pg_log_error("cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { pg_log_error("out of memory\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Dump all archiver stats. */ void display_fsm(char *table) { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; int color; int totalspace, freespace; int groupby, blocksize; int n; blocksize = 8192; /* grab the stats (this is the only stats on one line) */ /* snprintf(sql, sizeof(sql), "with fsm as (select blkno/443 as blockrange, sum(avail) as available, 8192*443 as total from pg_freespace('%s') group by 1)" "select blockrange, available, total, 100*available/total as ratio, 180*available/total as color from fsm order by 1", table); */ snprintf(sql, sizeof(sql), "select avail from pg_freespace('%s') order by blkno", table); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* initialize some vars */ totalspace = nrows*blocksize; if (nrows <= opts->groups) groupby = 1; else groupby = nrows/opts->groups; freespace = 0; n = 0; printf("Pages #: %d\n", nrows); printf("Table size: %d\n", totalspace); printf("... group of %d\n", groupby); printf("\n\n"); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* getting new values */ freespace += atol(PQgetvalue(res, row, 0)); if (++n >= groupby) { //printf("Free space [%d] : %d (on %d)\n", n, freespace, groupby*blocksize); /* printing the diff... * note that the first line will be the current value, rather than the diff */ color = 180*freespace/(8192*groupby); if (color<0) color = 0; couleur(color); printf(" "); nocouleur(); freespace = 0; n = 0; } } printf("\n\n"); /* cleanup */ PQclear(res); } /* * Fetch block size. */ void fetch_blocksize() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT current_setting('block_size')"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ opts->blocksize = atoi(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("Detected block size: %d\n", opts->blocksize); /* cleanup */ PQclear(res); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { PQfinish(conn); exit(EXIT_FAILURE); } /* * Main function */ int main(int argc, char **argv) { const char *progname; ConnParams cparams; /* * If the user stops the program (control-Z) and then resumes it, * print out the header again. */ pqsignal(SIGINT, quit_properly); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Initialize the logging interface */ pg_logging_init(argv[0]); /* Get the program name */ progname = get_progname(argv[0]); /* Set the connection struct */ cparams.pghost = opts->hostname; cparams.pgport = opts->port; cparams.pguser = opts->username; cparams.dbname = opts->dbname; cparams.prompt_password = TRI_DEFAULT; cparams.override_dbname = NULL; /* Connect to the database */ conn = connectDatabase(&cparams, progname, false, false, false); // check last vacuum timestamp // fetch blocks count fetch_blocksize(); display_fsm(opts->table); PQfinish(conn); return 0; } pgstats-REL1_4_0/pgfe_cancel.c000066400000000000000000000126301470250475400163120ustar00rootroot00000000000000/*------------------------------------------------------------------------ * * Query cancellation support for frontend code * * Assorted utility functions to control query cancellation with signal * handler for SIGINT. * * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/cancel.c * *------------------------------------------------------------------------ */ #include "postgres_fe.h" #include #include "common/connect.h" #include "fe_utils/cancel.h" #include "fe_utils/string_utils.h" /* * Write a simple string to stderr --- must be safe in a signal handler. * We ignore the write() result since there's not much we could do about it. * Certain compilers make that harder than it ought to be. */ #define write_stderr(str) \ do { \ const char *str_ = (str); \ int rc_; \ rc_ = write(fileno(stderr), str_, strlen(str_)); \ (void) rc_; \ } while (0) /* * Contains all the information needed to cancel a query issued from * a database connection to the backend. */ static PGcancel *volatile cancelConn = NULL; /* * Predetermined localized error strings --- needed to avoid trying * to call gettext() from a signal handler. */ static const char *cancel_sent_msg = NULL; static const char *cancel_not_sent_msg = NULL; /* * CancelRequested is set when we receive SIGINT (or local equivalent). * There is no provision in this module for resetting it; but applications * might choose to clear it after successfully recovering from a cancel. * Note that there is no guarantee that we successfully sent a Cancel request, * or that the request will have any effect if we did send it. */ volatile sig_atomic_t CancelRequested = false; #ifdef WIN32 static CRITICAL_SECTION cancelConnLock; #endif /* * Additional callback for cancellations. */ static void (*cancel_callback) (void) = NULL; /* * SetCancelConn * * Set cancelConn to point to the current database connection. */ void SetCancelConn(PGconn *conn) { PGcancel *oldCancelConn; #ifdef WIN32 EnterCriticalSection(&cancelConnLock); #endif /* Free the old one if we have one */ oldCancelConn = cancelConn; /* be sure handle_sigint doesn't use pointer while freeing */ cancelConn = NULL; if (oldCancelConn != NULL) PQfreeCancel(oldCancelConn); cancelConn = PQgetCancel(conn); #ifdef WIN32 LeaveCriticalSection(&cancelConnLock); #endif } /* * ResetCancelConn * * Free the current cancel connection, if any, and set to NULL. */ void ResetCancelConn(void) { PGcancel *oldCancelConn; #ifdef WIN32 EnterCriticalSection(&cancelConnLock); #endif oldCancelConn = cancelConn; /* be sure handle_sigint doesn't use pointer while freeing */ cancelConn = NULL; if (oldCancelConn != NULL) PQfreeCancel(oldCancelConn); #ifdef WIN32 LeaveCriticalSection(&cancelConnLock); #endif } /* * Code to support query cancellation * * Note that sending the cancel directly from the signal handler is safe * because PQcancel() is written to make it so. We use write() to report * to stderr because it's better to use simple facilities in a signal * handler. * * On Windows, the signal canceling happens on a separate thread, because * that's how SetConsoleCtrlHandler works. The PQcancel function is safe * for this (unlike PQrequestCancel). However, a CRITICAL_SECTION is required * to protect the PGcancel structure against being changed while the signal * thread is using it. */ #ifndef WIN32 /* * handle_sigint * * Handle interrupt signals by canceling the current command, if cancelConn * is set. */ static void handle_sigint(SIGNAL_ARGS) { int save_errno = errno; char errbuf[256]; CancelRequested = true; if (cancel_callback != NULL) cancel_callback(); /* Send QueryCancel if we are processing a database query */ if (cancelConn != NULL) { if (PQcancel(cancelConn, errbuf, sizeof(errbuf))) { write_stderr(cancel_sent_msg); } else { write_stderr(cancel_not_sent_msg); write_stderr(errbuf); } } errno = save_errno; /* just in case the write changed it */ } /* * setup_cancel_handler * * Register query cancellation callback for SIGINT. */ void setup_cancel_handler(void (*query_cancel_callback) (void)) { cancel_callback = query_cancel_callback; cancel_sent_msg = _("Cancel request sent\n"); cancel_not_sent_msg = _("Could not send cancel request: "); pqsignal(SIGINT, handle_sigint); } #else /* WIN32 */ static BOOL WINAPI consoleHandler(DWORD dwCtrlType) { char errbuf[256]; if (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT) { CancelRequested = true; if (cancel_callback != NULL) cancel_callback(); /* Send QueryCancel if we are processing a database query */ EnterCriticalSection(&cancelConnLock); if (cancelConn != NULL) { if (PQcancel(cancelConn, errbuf, sizeof(errbuf))) { write_stderr(cancel_sent_msg); } else { write_stderr(cancel_not_sent_msg); write_stderr(errbuf); } } LeaveCriticalSection(&cancelConnLock); return TRUE; } else /* Return FALSE for any signals not being handled */ return FALSE; } void setup_cancel_handler(void (*callback) (void)) { cancel_callback = callback; cancel_sent_msg = _("Cancel request sent\n"); cancel_not_sent_msg = _("Could not send cancel request: "); InitializeCriticalSection(&cancelConnLock); SetConsoleCtrlHandler(consoleHandler, TRUE); } #endif /* WIN32 */ pgstats-REL1_4_0/pgfe_connect_utils.c000066400000000000000000000107511470250475400177400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * Facilities for frontend code to connect to and disconnect from databases. * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/connect_utils.c * *------------------------------------------------------------------------- */ #include "postgres_fe.h" #include "common/connect.h" #include "common/logging.h" #include "common/string.h" #include "fe_utils/connect_utils.h" #include "fe_utils/query_utils.h" /* * Make a database connection with the given parameters. * * An interactive password prompt is automatically issued if needed and * allowed by cparams->prompt_password. * * If allow_password_reuse is true, we will try to re-use any password * given during previous calls to this routine. (Callers should not pass * allow_password_reuse=true unless reconnecting to the same database+user * as before, else we might create password exposure hazards.) */ PGconn * connectDatabase(const ConnParams *cparams, const char *progname, bool echo, bool fail_ok, bool allow_password_reuse) { PGconn *conn; bool new_pass; static char *password = NULL; /* Callers must supply at least dbname; other params can be NULL */ Assert(cparams->dbname); if (!allow_password_reuse && password) { free(password); password = NULL; } if (cparams->prompt_password == TRI_YES && password == NULL) password = simple_prompt("Password: ", false); /* * Start the connection. Loop until we have a password if requested by * backend. */ do { const char *keywords[8]; const char *values[8]; int i = 0; /* * If dbname is a connstring, its entries can override the other * values obtained from cparams; but in turn, override_dbname can * override the dbname component of it. */ keywords[i] = "host"; values[i++] = cparams->pghost; keywords[i] = "port"; values[i++] = cparams->pgport; keywords[i] = "user"; values[i++] = cparams->pguser; keywords[i] = "password"; values[i++] = password; keywords[i] = "dbname"; values[i++] = cparams->dbname; if (cparams->override_dbname) { keywords[i] = "dbname"; values[i++] = cparams->override_dbname; } keywords[i] = "fallback_application_name"; values[i++] = progname; keywords[i] = NULL; values[i++] = NULL; Assert(i <= lengthof(keywords)); new_pass = false; conn = PQconnectdbParams(keywords, values, true); if (!conn) pg_fatal("could not connect to database %s: out of memory", cparams->dbname); /* * No luck? Trying asking (again) for a password. */ if (PQstatus(conn) == CONNECTION_BAD && PQconnectionNeedsPassword(conn) && cparams->prompt_password != TRI_NO) { PQfinish(conn); free(password); password = simple_prompt("Password: ", false); new_pass = true; } } while (new_pass); /* check to see that the backend connection was successfully made */ if (PQstatus(conn) == CONNECTION_BAD) { if (fail_ok) { PQfinish(conn); return NULL; } pg_fatal("%s", PQerrorMessage(conn)); } /* Start strict; callers may override this. */ PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, echo)); return conn; } /* * Try to connect to the appropriate maintenance database. * * This differs from connectDatabase only in that it has a rule for * inserting a default "dbname" if none was given (which is why cparams * is not const). Note that cparams->dbname should typically come from * a --maintenance-db command line parameter. */ PGconn * connectMaintenanceDatabase(ConnParams *cparams, const char *progname, bool echo) { PGconn *conn; /* If a maintenance database name was specified, just connect to it. */ if (cparams->dbname) return connectDatabase(cparams, progname, echo, false, false); /* Otherwise, try postgres first and then template1. */ cparams->dbname = "postgres"; conn = connectDatabase(cparams, progname, echo, true, false); if (!conn) { cparams->dbname = "template1"; conn = connectDatabase(cparams, progname, echo, false, false); } return conn; } /* * Disconnect the given connection, canceling any statement if one is active. */ void disconnectDatabase(PGconn *conn) { char errbuf[256]; Assert(conn != NULL); if (PQtransactionStatus(conn) == PQTRANS_ACTIVE) { PGcancel *cancel; if ((cancel = PQgetCancel(conn))) { (void) PQcancel(cancel, errbuf, sizeof(errbuf)); PQfreeCancel(cancel); } } PQfinish(conn); } pgstats-REL1_4_0/pgfe_query_utils.c000066400000000000000000000034521470250475400174540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * Facilities for frontend code to query a databases. * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/query_utils.c * *------------------------------------------------------------------------- */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/cancel.h" #include "fe_utils/query_utils.h" /* * Run a query, return the results, exit program on failure. */ PGresult * executeQuery(PGconn *conn, const char *query, bool echo) { PGresult *res; if (echo) printf("%s\n", query); res = PQexec(conn, query); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_error_detail("Query was: %s", query); PQfinish(conn); exit(1); } return res; } /* * As above for a SQL command (which returns nothing). */ void executeCommand(PGconn *conn, const char *query, bool echo) { PGresult *res; if (echo) printf("%s\n", query); res = PQexec(conn, query); if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_error_detail("Query was: %s", query); PQfinish(conn); exit(1); } PQclear(res); } /* * As above for a SQL maintenance command (returns command success). * Command is executed with a cancel handler set, so Ctrl-C can * interrupt it. */ bool executeMaintenanceCommand(PGconn *conn, const char *query, bool echo) { PGresult *res; bool r; if (echo) printf("%s\n", query); SetCancelConn(conn); res = PQexec(conn, query); ResetCancelConn(); r = (res && PQresultStatus(res) == PGRES_COMMAND_OK); PQclear(res); return r; } pgstats-REL1_4_0/pgreport.c000066400000000000000000000250501470250475400157260ustar00rootroot00000000000000/* * pgreport, a PostgreSQL app to get lots of informations from PostgreSQL * metadata and statistics. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2020-2024. * * pgstats/pgreport.c */ /* * PostgreSQL headers */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/connect_utils.h" #include "libpq/pqsignal.h" /* * pgreport headers */ #include "pgreport_queries.h" /* * Defines */ #define PGREPORT_VERSION "1.4.0" #define PGREPORT_DEFAULT_LINES 20 #define PGREPORT_DEFAULT_STRING_SIZE 2048 /* * Structs */ /* these are the options structure for command line parameters */ struct options { /* misc */ char *script; bool verbose; /* version number */ int major; int minor; }; /* * Global variables */ struct options *opts; extern char *optarg; const char *progname; /* * Function prototypes */ static void help(); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif bool backend_minimum_version(int major, int minor); void execute(char *query); void install_extension(char *extension); void fetch_version(void); void fetch_postmaster_reloadconftime(void); void fetch_postmaster_starttime(void); void fetch_table(char *label, char *query); void fetch_file(char *filename); void fetch_kernelconfig(char *cfg); void exec_command(char *cmd); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help() { printf("%s gets lots of informations from PostgreSQL metadata and statistics.\n\n" "Usage:\n" " %s [OPTIONS]\n" "\nGeneral options:\n" " -s VERSION generate SQL script for $VERSION release\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; /* set the defaults */ opts->script = NULL; opts->verbose = false; /* we should deal quickly with help and version */ if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgreport " PGREPORT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get options */ while ((c = getopt(argc, argv, "vs:")) != -1) { switch (c) { /* get script */ case 's': opts->script = pg_strdup(optarg); sscanf(opts->script, "%d.%d", &(opts->major), &(opts->minor)); break; /* get verbose */ case 'v': opts->verbose = true; break; default: pg_log_error("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } if (opts->script == NULL) { opts->script = "17"; } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { pg_log_error("out of memory (pg_malloc)\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { pg_log_error("cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { pg_log_error("out of memory (pg_strdup)\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Execute query */ void execute(char *query) { printf("%s;\n", query); } /* * Install extension */ void install_extension(char *extension) { printf("CREATE EXTENSION IF NOT EXISTS %s;\n", extension); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { printf("\\echo PostgreSQL version\n"); printf("SELECT version();\n"); } /* * Fetch PostgreSQL reload configuration time */ void fetch_postmaster_reloadconftime() { printf("\\echo PostgreSQL reload conf time\n"); printf("SELECT pg_conf_load_time();\n"); } /* * Fetch PostgreSQL start time */ void fetch_postmaster_starttime() { printf("\\echo PostgreSQL start time\n"); printf("SELECT pg_postmaster_start_time();\n"); } /* * Handle query */ void fetch_table(char *label, char *query) { printf("\\echo %s\n",label); printf("%s;\n",query); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { exit(EXIT_FAILURE); } /* * Main function */ int main(int argc, char **argv) { char sql[10240]; /* * If the user stops the program, * quit nicely. */ pqsignal(SIGINT, quit_properly); /* Initialize the logging interface */ pg_logging_init(argv[0]); /* Get the program name */ progname = get_progname(argv[0]); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); printf("\\echo =================================================================================\n"); printf("\\echo == pgreport SQL script for a %s release =========================================\n", opts->script); printf("\\echo =================================================================================\n"); printf("SET application_name to 'pgreport';\n"); /* Fetch version */ printf("\\echo # PostgreSQL Version\n\n"); fetch_version(); printf("\n"); /* Create schema, and set if as our search_path */ execute(CREATE_SCHEMA); execute(SET_SEARCHPATH); /* Install some extensions if they are not already there */ install_extension("pg_buffercache"); install_extension("pg_visibility"); /* Install some functions/views */ execute(CREATE_GETVALUE_FUNCTION_SQL); execute(CREATE_BLOATTABLE_VIEW_SQL); strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_1); strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_2); execute(sql); if (backend_minimum_version(10,0)) { execute(CREATE_ORPHANEDFILES_VIEW_SQL2); } else { execute(CREATE_ORPHANEDFILES_VIEW_SQL1); } /* Fetch postmaster start time */ printf("\\echo # PostgreSQL Start time\n\n"); fetch_postmaster_starttime(); printf("\n"); /* Fetch reload conf time */ printf("\\echo # PostgreSQL Reload conf time\n\n"); fetch_postmaster_reloadconftime(); printf("\n"); /* Fetch settings by various ways */ printf("\\echo # PostgreSQL Configuration\n\n"); fetch_table(SETTINGS_BY_SOURCEFILE_TITLE, SETTINGS_BY_SOURCEFILE_SQL); fetch_table(SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE, SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL); if (backend_minimum_version(9,5)) { fetch_table(PGFILESETTINGS_TITLE, PGFILESETTINGS_SQL); } if (backend_minimum_version(10,0)) { fetch_table(PGHBAFILERULES_TITLE, PGHBAFILERULES_SQL); } if (backend_minimum_version(15,0)) { fetch_table(PGIDENTFILEMAPPINGS_TITLE, PGIDENTFILEMAPPINGS_SQL); } fetch_table(PGSETTINGS_TITLE, PGSETTINGS_SQL); /* Fetch global objects */ printf("\\echo # Global objects\n\n"); fetch_table(CLUSTER_HITRATIO_TITLE, CLUSTER_HITRATIO_SQL); fetch_table(CLUSTER_BUFFERSUSAGE_TITLE, CLUSTER_BUFFERSUSAGE_SQL); fetch_table(CLUSTER_BUFFERSUSAGEDIRTY_TITLE, CLUSTER_BUFFERSUSAGEDIRTY_SQL); fetch_table(DATABASES_TITLE, DATABASES_SQL); fetch_table(DATABASES_IN_CACHE_TITLE, DATABASES_IN_CACHE_SQL); fetch_table(TABLESPACES_TITLE, TABLESPACES_SQL); fetch_table(ROLES_TITLE, backend_minimum_version(9,5) ? ROLES_SQL_95min : ROLES_SQL_94max); fetch_table(USER_PASSWORDS_TITLE, USER_PASSWORDS_SQL); fetch_table(DATABASEUSER_CONFIG_TITLE, DATABASEUSER_CONFIG_SQL); /* Fetch local objects of the current database */ if (backend_minimum_version(9,3)) { printf("SELECT current_database() AS db \\gset"); printf("\\echo # Local objects in database :'db'\n\n"); } else { printf("\\echo # Local objects in current database\n\n"); } fetch_table(SCHEMAS_TITLE, SCHEMAS_SQL); fetch_table(NBRELS_IN_SCHEMA_TITLE, NBRELS_IN_SCHEMA_SQL); if (backend_minimum_version(11,0)) { fetch_table(NBFUNCSPROCS_IN_SCHEMA_TITLE, NBFUNCSPROCS_IN_SCHEMA_SQL); } else { fetch_table(NBFUNCS_IN_SCHEMA_TITLE, NBFUNCS_IN_SCHEMA_SQL); } fetch_table(HEAPTOAST_SIZE_TITLE, HEAPTOAST_SIZE_SQL); fetch_table(EXTENSIONS_TITLE, EXTENSIONS_SQL); fetch_table(EXTENSIONSTABLE_TITLE, EXTENSIONSTABLE_SQL); fetch_table(KINDS_SIZE_TITLE, KINDS_SIZE_SQL); fetch_table(DEPENDENCIES_TITLE, DEPENDENCIES_SQL); fetch_table(KINDS_IN_CACHE_TITLE, KINDS_IN_CACHE_SQL); fetch_table(AM_SIZE_TITLE, AM_SIZE_SQL); fetch_table(INDEXTYPE_TITLE, INDEXTYPE_SQL); fetch_table(INDEXONTEXT_TITLE, INDEXONTEXT_SQL); fetch_table(PERCENTUSEDINDEXES_TITLE, PERCENTUSEDINDEXES_SQL); fetch_table(UNUSEDINDEXES_TITLE, UNUSEDINDEXES_SQL); fetch_table(REDUNDANTINDEXES_TITLE, REDUNDANTINDEXES_SQL); fetch_table(ORPHANEDFILES_TITLE, ORPHANEDFILES_SQL); fetch_table(NBFUNCS_TITLE, NBFUNCS_SQL); if (backend_minimum_version(11,0)) { fetch_table(FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE, FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL); } else { fetch_table(FUNCS_PER_SCHEMA_TITLE, FUNCS_PER_SCHEMA_SQL); } fetch_table(LOBJ_TITLE, LOBJ_SQL); fetch_table(LOBJ_STATS_TITLE, LOBJ_STATS_SQL); fetch_table(RELOPTIONS_TITLE, RELOPTIONS_SQL); fetch_table(NEEDVACUUM_TITLE, NEEDVACUUM_SQL); fetch_table(NEEDANALYZE_TITLE, NEEDANALYZE_SQL); fetch_table(MINAGE_TITLE, MINAGE_SQL); fetch_table(TOBEFROZEN_TABLES_TITLE, TOBEFROZEN_TABLES_SQL); fetch_table(BLOATOVERVIEW_TITLE, BLOATOVERVIEW_SQL); fetch_table(TOP20BLOAT_TABLES_TITLE, TOP20BLOAT_TABLES_SQL); fetch_table(TOP20BLOAT_INDEXES_TITLE, TOP20BLOAT_INDEXES_SQL); fetch_table(REPSLOTS_TITLE, REPSLOTS_SQL); if (backend_minimum_version(10,0)) { fetch_table(PUBLICATIONS_TITLE, PUBLICATIONS_SQL); fetch_table(SUBSCRIPTIONS_TITLE, SUBSCRIPTIONS_SQL); } /* fetch_table(TOP10QUERYIDS_TITLE, TOP10QUERYIDS_SQL); fetch_table(TOP10QUERIES_TITLE, TOP10QUERIES_SQL); */ /* * Uninstall all * Actually, it drops our schema, which should get rid of all our stuff */ execute(DROP_ALL); pg_free(opts); return 0; } pgstats-REL1_4_0/pgreport_queries.h000066400000000000000000000623001470250475400174670ustar00rootroot00000000000000#define SETTINGS_BY_SOURCEFILE_TITLE "Settings by source file" #define SETTINGS_BY_SOURCEFILE_SQL "SELECT source, sourcefile, count(*) AS nb FROM pg_settings GROUP BY 1, 2" #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE "Non default value and not config file settings" #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL "SELECT source, name, setting, unit FROM pg_settings WHERE source NOT IN ('configuration file', 'default') OR (setting != boot_val) ORDER BY source, name" #define CLUSTER_HITRATIO_TITLE "Hit ratio" #define CLUSTER_HITRATIO_SQL "SELECT 'index hit rate' AS name, 100.*sum(idx_blks_hit) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio FROM pg_statio_user_indexes UNION ALL SELECT 'table hit rate' AS name, 100.*sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio FROM pg_statio_user_tables" #define CLUSTER_BUFFERSUSAGE_TITLE "Buffers Usage" #define CLUSTER_BUFFERSUSAGE_SQL "SELECT usagecount, count(*) FROM pg_buffercache GROUP BY 1 ORDER BY 1" #define CLUSTER_BUFFERSUSAGEDIRTY_TITLE "Buffers Usage with dirty" #define CLUSTER_BUFFERSUSAGEDIRTY_SQL "SELECT usagecount, isdirty, count(*) FROM pg_buffercache GROUP BY 1,2 ORDER BY 1,2" #define DATABASES_TITLE "Databases" #define DATABASES_SQL "SELECT d.datname as \"Name\", pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\", pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\", d.datcollate as \"Collate\", d.datctype as \"Ctype\", pg_catalog.array_to_string(d.datacl, E'\n') AS \"Access privileges\", CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname)) ELSE 'No Access' END as \"Size\", t.spcname as \"Tablespace\", pg_catalog.shobj_description(d.oid, 'pg_database') as \"Description\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid ORDER BY 1" #define DATABASES_IN_CACHE_TITLE "Databases in cache" #define DATABASES_IN_CACHE_SQL "SELECT CASE WHEN datname IS NULL THEN '' ELSE datname END AS datname, pg_size_pretty(count(*)*8192) FROM pg_buffercache bc LEFT JOIN pg_database d ON d.oid=bc.reldatabase GROUP BY 1 ORDER BY count(*) DESC" #define TABLESPACES_TITLE "Tablespaces" #define TABLESPACES_SQL "SELECT spcname AS \"Name\", pg_catalog.pg_get_userbyid(spcowner) AS \"Owner\", pg_catalog.pg_tablespace_location(oid) AS \"Location\", pg_size_pretty(pg_tablespace_size(oid)) AS \"Size\", pg_catalog.array_to_string(spcacl, E'\n') AS \"Access privileges\", spcoptions AS \"Options\", pg_catalog.shobj_description(oid, 'pg_tablespace') AS \"Description\" FROM pg_catalog.pg_tablespace ORDER BY 1" #define ROLES_TITLE "Roles" #define ROLES_SQL_94max "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" #define ROLES_SQL_95min "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication, r.rolbypassrls FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" #define USER_PASSWORDS_TITLE "User passwords" #define USER_PASSWORDS_SQL "SELECT usename, valuntil, CASE WHEN passwd IS NULL THEN '' else passwd END AS passwd FROM pg_catalog.pg_shadow ORDER BY 1" #define DATABASEUSER_CONFIG_TITLE "Databases and users specific configuration" #define DATABASEUSER_CONFIG_SQL "select datname, rolname, setconfig from pg_db_role_setting drs left join pg_database d on d.oid=drs.setdatabase left join pg_roles r on r.oid=drs.setrole" #define SCHEMAS_TITLE "Schemas" #define SCHEMAS_SQL "SELECT n.nspname AS \"Name\", pg_catalog.pg_get_userbyid(n.nspowner) AS \"Owner\" FROM pg_catalog.pg_namespace n WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1" #define NBRELS_IN_SCHEMA_TITLE "Relations per kinds and schemas" #define NBRELS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where relkind='r') as tables, count(*) filter (where relkind='t') as toasts, count(*) filter (where relkind='i') as index, count(*) filter (where relkind='S') as sequences, count(*) filter (where relkind='v') as views, count(*) filter (where relkind='m') as matviews from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_class c on n.oid=c.relnamespace group by nspname, rolname order by 1, 2" #define NBFUNCS_IN_SCHEMA_TITLE "Functions per schema" #define NBFUNCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where p.oid is not null) as functions from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" #define NBFUNCSPROCS_IN_SCHEMA_TITLE "Routines per schema" #define NBFUNCSPROCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where prokind='f') as functions, count(*) filter (where prokind='p') as procedures from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" #define HEAPTOAST_SIZE_TITLE "HEAP and TOAST sizes per schema" #define HEAPTOAST_SIZE_SQL "select nspname, relname, pg_relation_size(c.oid) as heap_size, pg_relation_size(reltoastrelid) as toast_size from pg_namespace n join pg_class c on n.oid=c.relnamespace where pg_relation_size(reltoastrelid)>0 order by nspname, relname" #define EXTENSIONS_TITLE "Extensions" #define EXTENSIONS_SQL "SELECT e.extname AS \"Name\", e.extversion AS \"Version\", n.nspname AS \"Schema\", c.description AS \"Description\" FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace LEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oid AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass ORDER BY 1" #define EXTENSIONSTABLE_TITLE "Extensions Tables (dumpable or not?)" #define EXTENSIONSTABLE_SQL "WITH tables_dumped AS (SELECT e.extname, n.nspname||'.'||c.relname AS relation_name1 FROM pg_extension e, LATERAL unnest(extconfig) AS toid JOIN pg_class c on c.oid=toid JOIN pg_namespace n on n.oid=c.relnamespace) SELECT e.extname AS extension_name, relation_name2, tables_dumped.extname IS NOT NULL AS to_be_dumped FROM pg_catalog.pg_depend d JOIN pg_catalog.pg_extension e ON e.oid=d.refobjid, LATERAL pg_catalog.pg_describe_object(d.classid, d.objid, 0), LATERAL substr(pg_describe_object, case when pg_describe_object like 'table %' then length('table ') else length('sequence ') end + 1) AS relation_name2 LEFT JOIN tables_dumped ON tables_dumped.relation_name1=relation_name2 WHERE d.refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND d.deptype = 'e' AND (pg_describe_object like 'table %' OR pg_describe_object like 'sequence %') ORDER BY 1,2,3" #define KINDS_SIZE_TITLE "Number and size per relations kinds" #define KINDS_SIZE_SQL "SELECT nspname, relkind, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) FROM pg_class c JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1,2 ORDER BY 1,2" #define DEPENDENCIES_TITLE "Dependencies" #define DEPENDENCIES_SQL "with etypes as ( select classid::regclass, objid, deptype, e.extname from pg_depend join pg_extension e on refclassid = 'pg_extension'::regclass and refobjid = e.oid where classid = 'pg_type'::regclass ) select etypes.extname, etypes.objid::regtype as type, n.nspname as schema, c.relname as table, attname as column from pg_depend join etypes on etypes.classid = pg_depend.refclassid and etypes.objid = pg_depend.refobjid join pg_class c on c.oid = pg_depend.objid join pg_namespace n on n.oid = c.relnamespace join pg_attribute attr on attr.attrelid = pg_depend.objid and attr.attnum = pg_depend.objsubid where pg_depend.classid = 'pg_class'::regclass" #define KINDS_IN_CACHE_TITLE "Relation kinds in cache" #define KINDS_IN_CACHE_SQL "select relkind, pg_size_pretty(count(*)*8192) from pg_buffercache bc left join pg_class c on c.relfilenode=bc.relfilenode group by 1 order by count(*) desc" #define AM_SIZE_TITLE "Access Methods" #define AM_SIZE_SQL "select nspname, amname, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) from pg_class c join pg_am a on a.oid=c.relam join pg_namespace n on n.oid=c.relnamespace group by 1, 2 order by 1,2" #define INDEXTYPE_TITLE "Index by types" #define INDEXTYPE_SQL "SELECT nspname, count(*) FILTER (WHERE not indisunique AND not indisprimary) as standard, count(*) FILTER (WHERE indisunique AND not indisprimary) as unique, count(*) FILTER (WHERE indisprimary) as primary, count(*) FILTER (WHERE indisexclusion) as exclusion, count(*) FILTER (WHERE indisclustered) as clustered, count(*) FILTER (WHERE indisvalid) as valid FROM pg_index i JOIN pg_class c ON c.oid=i.indexrelid JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1;" #define INDEXONTEXT_TITLE "Index and opclass" #define INDEXONTEXT_SQL "WITH colind AS (SELECT i.indrelid AS oid, i.indrelid::regclass AS tbl, c.relname AS idx, unnest(i.indkey::int4[]) AS num, unnest(i.indclass::int4[]) AS class FROM pg_class c JOIN pg_am a ON a.oid = c.relam JOIN pg_index i ON i.indexrelid = c.oid WHERE c.relkind = 'i' AND c.relname NOT LIKE 'pg%' AND a.amname = 'btree') SELECT colind.tbl AS \"Table\", colind.idx AS \"Index\", a.attname AS \"Column\", t.typname AS \"Type\", oc.opcname AS \"Operator class\", oc.opcdefault AS \"Default?\" FROM colind JOIN pg_attribute a ON a.attrelid = colind.oid AND a.attnum = colind.num JOIN pg_type t ON t.oid = a.atttypid JOIN pg_opclass oc ON oc.oid = colind.class ORDER BY colind.tbl, colind.idx, colind.num" #define NBFUNCS_TITLE "User routines" #define NBFUNCS_SQL "select count(*) from pg_proc where pronamespace=2200 or pronamespace>16383" #define FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE "Routines per schema and kind" #define FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL "select n.nspname, l.lanname, p.prokind, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2, 3 order by 1, 2, 3" #define FUNCS_PER_SCHEMA_TITLE "Functions per schema and language" #define FUNCS_PER_SCHEMA_SQL "select n.nspname, l.lanname, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2 order by 1, 2" #define LOBJ_TITLE "Large Objects" #define LOBJ_SQL "select count(*) from pg_largeobject" #define LOBJ_STATS_TITLE "Large Objects Size" #define LOBJ_STATS_SQL "select reltuples, relpages from pg_class where relname='pg_largeobject'" #define RELOPTIONS_TITLE "Relation Options" #define RELOPTIONS_SQL "select nspname, relkind, relname, reloptions from pg_class c join pg_namespace n on n.oid=c.relnamespace where reloptions is not null order by 1, 3, 2" #define TOBEFROZEN_TABLES_TITLE "Tables to be frozen" #define TOBEFROZEN_TABLES_SQL "select count(*) from pg_class where relkind='r' and age(relfrozenxid)>current_setting('autovacuum_freeze_max_age')::integer" #define PGFILESETTINGS_TITLE "pg_file_settings" #define PGFILESETTINGS_SQL "select * from pg_file_settings " #define PGHBAFILERULES_TITLE "pg_hba_file_rules" #define PGHBAFILERULES_SQL "select * from pg_hba_file_rules" #define PGIDENTFILEMAPPINGS_TITLE "pg_ident_file_mappings" #define PGIDENTFILEMAPPINGS_SQL "select * from pg_ident_file_mappings" #define PUBLICATIONS_TITLE "Publications" #define PUBLICATIONS_SQL "select * from pg_publication" #define REPSLOTS_TITLE "Replication slots" #define REPSLOTS_SQL "select * from pg_replication_slots" #define SUBSCRIPTIONS_TITLE "Subscriptions" #define SUBSCRIPTIONS_SQL "select * from pg_subscription" #define PGSETTINGS_TITLE "pg_settings" #define PGSETTINGS_SQL "select * from pg_settings" #define TOP10QUERYIDS_SQL "select queryid, calls, total_time, mean_time from pg_stat_statements order by total_time desc limit 10" #define TOP10QUERIES_SQL "select queryid, query from pg_stat_statements order by total_time desc limit 10" #define PERCENTUSEDINDEXES_TITLE "Percentage usage of indexes" #define PERCENTUSEDINDEXES_SQL "SELECT relname, CASE idx_scan WHEN 0 THEN 'Insufficient data' ELSE (100 * idx_scan / (seq_scan + idx_scan))::text END percent_of_times_index_used, n_live_tup rows_in_table FROM pg_stat_user_tables ORDER BY n_live_tup DESC" #define UNUSEDINDEXES_TITLE "Unused indexes" #define UNUSEDINDEXES_SQL "select schemaname, count(*) from pg_stat_user_indexes s join pg_index i using (indexrelid) where idx_scan=0 and (not indisunique AND not indisprimary) group by 1;" #define REDUNDANTINDEXES_TITLE "Redundant indexes" #define REDUNDANTINDEXES_SQL "SELECT pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS SIZE, string_agg(idx::text, ', ') AS indexes FROM ( SELECT indexrelid::regclass AS idx, (indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'||COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')) AS KEY FROM pg_index) sub GROUP BY KEY HAVING COUNT(*)>1 ORDER BY SUM(pg_relation_size(idx)) DESC" #define MINAGE_TITLE "Min age" #define MINAGE_SQL "SELECT label, age FROM ( select 'Process #'||pid AS label, age(backend_xid) AS age from pg_stat_activity UNION select 'Process #'||pid, age(backend_xmin) from pg_stat_activity UNION select 'Prepared transaction '||gid, age(transaction) from pg_prepared_xacts UNION select 'Replication slot '||slot_name, age(xmin) from pg_replication_slots UNION select 'Replication slot '||slot_name, age(catalog_xmin) from pg_replication_slots) tmp UNION select 'Secondary '||client_addr, age(backend_xmin) FROM pg_stat_replication WHERE backend_xmin IS NOT NULL ORDER BY age DESC;" #define NEEDVACUUM_TITLE "Tables needing autoVACUUMs" #define NEEDVACUUM_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_dead_tup dead_tup, round((get_value('autovacuum_vacuum_threshold', c.reloptions, c.relkind) + get_value('autovacuum_vacuum_scale_factor', c.reloptions, c.relkind ) * c.reltuples)::numeric,2) max_dead_tup, st.last_autovacuum, count(*) FILTER (WHERE NOT all_visible) AS tobevacuumed_blocks, count(*) AS total_blocks FROM pg_stat_all_tables st, pg_class c, LATERAL pg_visibility_map(st.relid) WHERE c.oid = st.relid AND c.relkind IN ('r','m','t') AND st.n_dead_tup>0 GROUP BY 1,2,3,4" #define NEEDANALYZE_TITLE "Tables needing autoANALYZEs" #define NEEDANALYZE_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_mod_since_analyze mod_tup, get_value('autovacuum_analyze_threshold', c.reloptions, c.relkind) + get_value('autovacuum_analyze_scale_factor', c.reloptions, c.relkind) * c.reltuples max_mod_tup, st.last_autoanalyze FROM pg_stat_all_tables st, pg_class c WHERE c.oid = st.relid AND c.relkind IN ('r','m') AND st.n_mod_since_analyze>0" #define CREATE_GETVALUE_FUNCTION_SQL "CREATE FUNCTION get_value(param text, reloptions text[], relkind \"char\") RETURNS float AS $$ SELECT coalesce((SELECT option_value FROM pg_options_to_table(reloptions) WHERE option_name = CASE WHEN relkind = 't' THEN 'toast.' ELSE '' END || param), current_setting(param))::float; $$ LANGUAGE sql" #define CREATE_BLOATTABLE_VIEW_SQL "CREATE TEMPORARY VIEW bloat_table AS SELECT schemaname, tblname, bs*tblpages AS real_size, (tblpages-est_tblpages)*bs AS extra_size, CASE WHEN tblpages - est_tblpages > 0 THEN 100 * (tblpages - est_tblpages)/tblpages::float ELSE 0 END AS extra_ratio, fillfactor, CASE WHEN tblpages - est_tblpages_ff > 0 THEN (tblpages-est_tblpages_ff)*bs ELSE 0 END AS bloat_size, CASE WHEN tblpages - est_tblpages_ff > 0 THEN 100 * (tblpages - est_tblpages_ff)/tblpages::float ELSE 0 END AS bloat_ratio, is_na FROM ( SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages, ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff, tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na FROM ( SELECT ( 4 + tpl_hdr_size + tpl_data_size + (2*ma) - CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END - CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END) AS tpl_size, bs - page_hdr AS size_per_block, (heappages + toastpages) AS tblpages, heappages, toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na FROM ( SELECT tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS tblname, tbl.reltuples, tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages, coalesce(toast.reltuples, 0) AS toasttuples, coalesce(substring( array_to_string(tbl.reloptions, ' ') FROM 'fillfactor=([0-9]+)')::smallint, 100) AS fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma, 24 AS page_hdr, 23 + CASE WHEN MAX(coalesce(s.null_frac,0)) > 0 THEN ( 7 + count(s.attname) ) / 8 ELSE 0::int END + CASE WHEN bool_or(att.attname = 'oid' and att.attnum < 0) THEN 4 ELSE 0 END AS tpl_hdr_size, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0) ) AS tpl_data_size, bool_or(att.atttypid = 'pg_catalog.name'::regtype) OR sum(CASE WHEN att.attnum > 0 THEN 1 ELSE 0 END) <> count(s.attname) AS is_na FROM pg_attribute AS att JOIN pg_class AS tbl ON att.attrelid = tbl.oid JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace LEFT JOIN pg_stats AS s ON s.schemaname=ns.nspname AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid WHERE NOT att.attisdropped AND tbl.relkind in ('r','m') GROUP BY 1,2,3,4,5,6,7,8,9,10 ORDER BY 2,3) AS s) AS s2) AS s3" #define CREATE_BLOATINDEX_VIEW_SQL_1 "CREATE TEMPORARY VIEW bloat_index AS SELECT nspname AS schemaname, tblname, idxname, bs*(relpages)::bigint AS real_size, bs*(relpages-est_pages)::bigint AS extra_size, 100 * (relpages-est_pages)::float / relpages AS extra_ratio, fillfactor, CASE WHEN relpages > est_pages_ff THEN bs*(relpages-est_pages_ff) ELSE 0 END AS bloat_size, 100 * (relpages-est_pages_ff)::float / relpages AS bloat_ratio, is_na FROM ( SELECT coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)/(4+nulldatahdrwidth)::float)), 0) AS est_pages, coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0) AS est_pages_ff, bs, nspname, tblname, idxname, relpages, fillfactor, is_na FROM ( SELECT maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, ( index_tuple_hdr_bm + maxalign - CASE WHEN index_tuple_hdr_bm%maxalign = 0 THEN maxalign ELSE index_tuple_hdr_bm%maxalign END + nulldatawidth + maxalign - CASE WHEN nulldatawidth = 0 THEN 0 WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign ELSE nulldatawidth::integer%maxalign END)::numeric AS nulldatahdrwidth, pagehdr, pageopqdata, is_na FROM ( SELECT n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.idxoid, i.fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version() ~ 'mingw32' OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS maxalign, 24 AS pagehdr, 16 AS pageopqdata, CASE WHEN max(coalesce(s.null_frac,0)) = 0 THEN 2 ELSE 2 + (( 32 + 8 - 1 ) / 8) END AS index_tuple_hdr_bm, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) AS nulldatawidth, max( CASE WHEN i.atttypid = 'pg_catalog.name'::regtype THEN 1 ELSE 0 END ) > 0 AS is_na FROM ( SELECT ct.relname AS tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey, ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor, coalesce(a1.attnum, a2.attnum) AS attnum, coalesce(a1.attname, a2.attname) AS attname, coalesce(a1.atttypid, a2.atttypid) AS atttypid, CASE WHEN a1.attnum IS NULL THEN ic.idxname ELSE ct.relname END AS attrelname FROM ( SELECT idxname, reltuples, relpages, tbloid, idxoid, fillfactor, indkey, pg_catalog.generate_series(1,indnatts) AS attpos " #define CREATE_BLOATINDEX_VIEW_SQL_2 "FROM ( SELECT ci.relname AS idxname, ci.reltuples, ci.relpages, i.indrelid AS tbloid, i.indexrelid AS idxoid, coalesce(substring( array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) AS fillfactor, i.indnatts, pg_catalog.string_to_array(pg_catalog.textin( pg_catalog.int2vectorout(i.indkey)),' ')::int[] AS indkey FROM pg_catalog.pg_index i JOIN pg_catalog.pg_class ci ON ci.oid = i.indexrelid WHERE ci.relam=(SELECT oid FROM pg_am WHERE amname = 'btree') AND ci.relpages > 0) AS idx_data) AS ic JOIN pg_catalog.pg_class ct ON ct.oid = ic.tbloid LEFT JOIN pg_catalog.pg_attribute a1 ON ic.indkey[ic.attpos] <> 0 AND a1.attrelid = ic.tbloid AND a1.attnum = ic.indkey[ic.attpos] LEFT JOIN pg_catalog.pg_attribute a2 ON ic.indkey[ic.attpos] = 0 AND a2.attrelid = ic.idxoid AND a2.attnum = ic.attpos) i JOIN pg_catalog.pg_namespace n ON n.oid = i.relnamespace JOIN pg_catalog.pg_stats s ON s.schemaname = n.nspname AND s.tablename = i.attrelname AND s.attname = i.attname GROUP BY 1,2,3,4,5,6,7,8,9,10,11) AS rows_data_stats) AS rows_hdr_pdg_stats) AS relation_stats" #define CREATE_ORPHANEDFILES_VIEW_SQL1 "CREATE TEMPORARY VIEW orphaned_files AS WITH ver AS ( select current_setting('server_version_num') pgversion, v::integer/10000||'.'||mod(v::integer,10000)/100 AS version FROM current_setting('server_version_num') v), tbl_paths AS ( SELECT tbs.oid AS tbs_oid, spcname, 'pg_tblspc/' || tbs.oid || '/' || (SELECT dir FROM pg_ls_dir('pg_tblspc/'||tbs.oid||'/',true,false) dir WHERE dir LIKE E'PG\\_'||ver.version||E'\\_%' ) as tbl_path FROM pg_tablespace tbs, ver WHERE tbs.spcname NOT IN ('pg_default','pg_global')), files AS ( SELECT d.oid AS database_oid, 0 AS tbs_oid, 'base/'||d.oid AS path, file_name AS file_name, substring(file_name from E'[0-9]+' ) AS base_name FROM pg_database d, pg_ls_dir('base/' || d.oid,true,false) AS file_name WHERE d.datname = current_database() UNION ALL SELECT d.oid, tbp.tbs_oid, tbl_path||'/'||d.oid, file_name, (substring(file_name from E'[0-9]+' )) AS base_name FROM pg_database d, tbl_paths tbp, pg_ls_dir(tbp.tbl_path||'/'|| d.oid,true,false) AS file_name WHERE d.datname = current_database()), orphans AS ( SELECT tbs_oid, base_name, file_name, current_setting('data_directory')||'/'||path||'/'||file_name as orphaned_file, pg_filenode_relation (tbs_oid,base_name::oid) as rel_without_pgclass FROM ver, files LEFT JOIN pg_class c ON (c.relfilenode::text=files.base_name OR (c.oid::text = files.base_name and c.relfilenode=0 and c.relname like 'pg_%')) WHERE c.oid IS null AND lower(file_name) NOT LIKE 'pg_%') SELECT orphaned_file, pg_size_pretty((pg_stat_file(orphaned_file)).size) as file_size, (pg_stat_file(orphaned_file)).modification as modification_date, current_database() FROM orphans WHERE rel_without_pgclass IS NULL" #define CREATE_ORPHANEDFILES_VIEW_SQL2 "CREATE TEMPORARY VIEW orphaned_files AS WITH ver AS ( select current_setting('server_version_num') pgversion, v::integer/10000 AS version FROM current_setting('server_version_num') v), tbl_paths AS ( SELECT tbs.oid AS tbs_oid, spcname, 'pg_tblspc/' || tbs.oid || '/' || (SELECT dir FROM pg_ls_dir('pg_tblspc/'||tbs.oid||'/',true,false) dir WHERE dir LIKE E'PG\\_'||ver.version||E'\\_%' ) as tbl_path FROM pg_tablespace tbs, ver WHERE tbs.spcname NOT IN ('pg_default','pg_global')), files AS ( SELECT d.oid AS database_oid, 0 AS tbs_oid, 'base/'||d.oid AS path, file_name AS file_name, substring(file_name from E'[0-9]+' ) AS base_name FROM pg_database d, pg_ls_dir('base/' || d.oid,true,false) AS file_name WHERE d.datname = current_database() UNION ALL SELECT d.oid, tbp.tbs_oid, tbl_path||'/'||d.oid, file_name, (substring(file_name from E'[0-9]+' )) AS base_name FROM pg_database d, tbl_paths tbp, pg_ls_dir(tbp.tbl_path||'/'|| d.oid,true,false) AS file_name WHERE d.datname = current_database()), orphans AS ( SELECT tbs_oid, base_name, file_name, current_setting('data_directory')||'/'||path||'/'||file_name as orphaned_file, pg_filenode_relation (tbs_oid,base_name::oid) as rel_without_pgclass FROM ver, files LEFT JOIN pg_class c ON (c.relfilenode::text=files.base_name OR (c.oid::text = files.base_name and c.relfilenode=0 and c.relname like 'pg_%')) WHERE c.oid IS null AND lower(file_name) NOT LIKE 'pg_%') SELECT orphaned_file, pg_size_pretty((pg_stat_file(orphaned_file)).size) as file_size, (pg_stat_file(orphaned_file)).modification as modification_date, current_database() FROM orphans WHERE rel_without_pgclass IS NULL" #define BLOATOVERVIEW_TITLE "Bloat Overview" #define BLOATOVERVIEW_SQL "SELECT 'Tables'' bloat' AS label, pg_size_pretty(sum(bloat_size)::numeric) AS bloat_size FROM bloat_table UNION SELECT 'Indexes'' bloat', pg_size_pretty(sum(bloat_size)::numeric) FROM bloat_index" #define TOP20BLOAT_TABLES_TITLE "Top 20 most fragmented tables (over 1MB)" #define TOP20BLOAT_TABLES_SQL "SELECT * FROM bloat_table WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" #define TOP20BLOAT_INDEXES_TITLE "Top 20 most fragmented indexes (over 1MB)" #define TOP20BLOAT_INDEXES_SQL "SELECT * FROM bloat_index WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" #define ORPHANEDFILES_TITLE "Orphaned files" #define ORPHANEDFILES_SQL "SELECT * FROM orphaned_files ORDER BY file_size DESC" #define CREATE_SCHEMA "CREATE SCHEMA pgreport" #define SET_SEARCHPATH "SET search_path TO pgreport" #define DROP_ALL "DROP FUNCTION get_value(text, text[], \"char\");DROP EXTENSION pg_buffercache;DROP EXTENSION pg_visibility;DROP SCHEMA pgreport" pgstats-REL1_4_0/pgstat.c000066400000000000000000004606441470250475400154020ustar00rootroot00000000000000/* * pgstat, a PostgreSQL app to gather statistical informations * from a PostgreSQL database, and act like a vmstat tool. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2014-2024. * * pgstats/pgstat.c */ /* * System headers */ #include /* * PostgreSQL headers */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/connect_utils.h" #include "libpq/pqsignal.h" /* * Defines */ #define PGSTAT_VERSION "1.4.0" #define PGSTAT_DEFAULT_LINES 20 #define PGSTAT_DEFAULT_STRING_SIZE 1024 #define PGSTAT_OLDEST_STAT_RESET "0001-01-01" #define half_rounded(x) (((x) + ((x) < 0 ? -1 : 1)) / 2) /* * Structs and enums */ /* units enum */ typedef enum { NO_UNIT = 0, ALL_UNIT, SIZE_UNIT } unit_t; /* stats enum */ typedef enum { NONE = 0, ARCHIVER, BGWRITER, BUFFERCACHE, CHECKPOINTER, CONNECTION, DATABASE, TABLE, TABLEIO, INDEX, FUNCTION, STATEMENT, SLRU, XLOG, DEADLIVE, TEMPFILE, REPSLOTS, WAITEVENT, WAL, PROGRESS_ANALYZE, PROGRESS_BASEBACKUP, PROGRESS_CLUSTER, PROGRESS_COPY, PROGRESS_CREATEINDEX, PROGRESS_VACUUM, PBPOOLS, PBSTATS } stat_t; /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; bool dontredisplayheader; stat_t stat; char *substat; char *filter; bool human_readable; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; /* extension namespace (pg_stat_statements or pg_buffercache) */ char *namespace; /* frequency */ int interval; int count; }; /* structs for pretty printing */ struct size_pretty_unit { const char *name; long limit; bool round; long unitbits; }; struct nosize_pretty_unit { const char *name; long limit; bool round; long divider; }; /* pg_stat_archiver struct */ struct pgstatarchiver { long archived_count; /* we don't put these columns here because it makes no sense to get a diff between the new and the old values ? last_archived_wal; ? last_archived_time; */ long failed_count; /* we don't put these columns here because it makes no sense to get a diff between the new and the old values ? last_failed_wal; ? last_failed_time; */ char *stats_reset; }; /* pg_stat_bgwriter struct */ struct pgstatbgwriter { long buffers_clean; long maxwritten_clean; long buffers_alloc; char *stats_reset; }; /* pg_stat_checkpointer struct */ struct pgstatcheckpointer { long checkpoints_timed; /* real name is num_timed */ long checkpoints_requested; /* real name is num_requested */ long restartpoints_timed; long restartpoints_requested; /* real name is restartpoints_req */ long restartpoints_done; long write_time; long sync_time; long buffers_written; char *stats_reset; }; /* pg_stat_database struct */ struct pgstatdatabase { /* we don't put numbackends here because it makes no sense to get a diff between the new and the old values long numbackends; */ long xact_commit; long xact_rollback; long blks_read; long blks_hit; long tup_returned; long tup_fetched; long tup_inserted; long tup_updated; long tup_deleted; long conflicts; long temp_files; long temp_bytes; long deadlocks; long checksum_failures; /* checksum_last_failure */ float blk_read_time; float blk_write_time; float session_time; float active_time; float idle_in_transaction_time; long sessions; long sessions_abandoned; long sessions_fatal; long sessions_killed; char *stats_reset; }; /* pg_stat_all_tables struct */ struct pgstattable { long seq_scan; /* we don't put the timestamps here because it makes no sense to get a diff between the new and the old values ? last_seq_scan; */ long seq_tup_read; long idx_scan; /* we don't put the timestamps here because it makes no sense to get a diff between the new and the old values ? last_idx_scan; */ long idx_tup_fetch; long n_tup_ins; long n_tup_upd; long n_tup_del; long n_tup_hot_upd; long n_tup_newpage_upd; long n_live_tup; long n_dead_tup; long n_mod_since_analyze; long n_ins_since_vacuum; /* we don't put the timestamps here because it makes no sense to get a diff between the new and the old values ? last_vacuum; ? last_autovacuum; ? last_analyze; ? last_autoanalyze; */ long vacuum_count; long autovacuum_count; long analyze_count; long autoanalyze_count; }; /* pg_statio_all_tables struct */ struct pgstattableio { long heap_blks_read; long heap_blks_hit; long idx_blks_read; long idx_blks_hit; long toast_blks_read; long toast_blks_hit; long tidx_blks_read; long tidx_blks_hit; }; /* pg_stat_all_indexes struct */ struct pgstatindex { long idx_scan; /* we don't put the timestamps here because it makes no sense to get a diff between the new and the old values ? last_idx_scan; */ long idx_tup_read; long idx_tup_fetch; }; /* pg_stat_user_functions struct */ struct pgstatfunction { long calls; float total_time; float self_time; }; /* pg_stat_statements struct */ struct pgstatstatement { /* long userid; long dbid; long queryid; text query; */ long plans; float total_plan_time; /* float min_plan_time; float max_plan_time; float mean_plan_time; float stddev_plan_time; */ long calls; float total_exec_time; /* float min_exec_time; float max_exec_time; float mean_exec_time; float stddev_exec_time; */ long rows; long shared_blks_hit; long shared_blks_read; long shared_blks_dirtied; long shared_blks_written; long local_blks_hit; long local_blks_read; long local_blks_dirtied; long local_blks_written; long temp_blks_read; long temp_blks_written; float shared_blk_read_time; /* 9.2 - 16, blk_read_time */ float shared_blk_write_time; /* 9.2 - 16, blk_write_time */ float local_blk_read_time; /* 9.2 - 16, blk_read_time */ float local_blk_write_time; /* 9.2 - 16, blk_write_time */ float temp_blk_read_time; /* 9.2 - 15, blk_read_time */ float temp_blk_write_time; /* 9.2 - 15, blk_write_time */ long wal_records; long wal_fpi; long wal_bytes; long jit_functions; float jit_generation_time; long jit_inlining_count; float jit_inlining_time; long jit_optimization_count; float jit_optimization_time; long jit_emission_count; float jit_emission_time; long jit_deform_count; float jit_deform_time; char *stats_since; char *minmax_stats_since; }; /* pg_stat_slru struct */ struct pgstatslru { long blks_zeroed; long blks_hit; long blks_read; long blks_written; long blks_exists; long flushes; long truncates; char *stats_reset; }; /* pg_stat_wal struct */ struct pgstatwal { long wal_records; long wal_fpi; long wal_bytes; long wal_buffers_full; long wal_write; long wal_sync; float wal_write_time; float wal_sync_time; char *stats_reset; }; /* deadlivestats struct */ struct deadlivestats { long live; long dead; }; /* repslots struct */ /* TODO : there is a lot of other informations, might want to check them */ struct repslots { char *currentlocation; char *restartlsn; long restartlsndiff; }; /* xlogstats struct */ struct xlogstats { char *location; long locationdiff; }; /* pgBouncer stats struct */ struct pgbouncerstats { long total_request; long total_received; long total_sent; long total_query_time; /* not used yet float avg_req; float avg_recv; float avg_sent; float avg_query; */ }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; struct pgstatarchiver *previous_pgstatarchiver; struct pgstatbgwriter *previous_pgstatbgwriter; struct pgstatcheckpointer *previous_pgstatcheckpointer; struct pgstatdatabase *previous_pgstatdatabase; struct pgstattable *previous_pgstattable; struct pgstattableio *previous_pgstattableio; struct pgstatindex *previous_pgstatindex; struct pgstatfunction *previous_pgstatfunction; struct pgstatstatement *previous_pgstatstatement; struct pgstatslru *previous_pgstatslru; struct pgstatwal *previous_pgstatwal; struct xlogstats *previous_xlogstats; struct deadlivestats *previous_deadlivestats; struct repslots *previous_repslots; struct pgbouncerstats *previous_pgbouncerstats; int hdrcnt = 0; volatile sig_atomic_t wresized; static int winlines = PGSTAT_DEFAULT_LINES; static const struct size_pretty_unit size_pretty_units[] = { {" b", 10 * 1024, false, 0}, {"kB", 20 * 1024 - 1, true, 10}, {"MB", 20 * 1024 - 1, true, 20}, {"GB", 20 * 1024 - 1, true, 30}, {"TB", 20 * 1024 - 1, true, 40}, {"PB", 20 * 1024 - 1, true, 50}, {NULL, 0, false, 0} }; static const struct nosize_pretty_unit nosize_pretty_units[] = { {" ", 10 * 1000, false, 1000}, {"k", 20 * 1000 - 1, true, 1000}, {"M", 20 * 1000 - 1, true, 1000}, {"G", 20 * 1000 - 1, true, 1000}, {"T", 20 * 1000 - 1, true, 1000}, {"P", 20 * 1000 - 1, true, 1000}, {NULL, 0, false, 0} }; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif char *pg_size_pretty(long long size); char *pg_nosize_pretty(long long size); void format(char *r, long long value, long length, unit_t SIZE_UNIT); void format_time(char *r, float value, long length); void print_pgstatarchiver(void); void print_pgstatbgwriter(void); void print_pgstatcheckpointer(void); void print_pgstatconnection(void); void print_pgstatdatabase(void); void print_pgstattable(void); void print_pgstattableio(void); void print_pgstatindex(void); void print_pgstatfunction(void); void print_pgstatstatement(void); void print_pgstatslru(void); void print_pgstatwal(void); void print_pgstatprogressanalyze(void); void print_pgstatprogressbasebackup(void); void print_pgstatprogresscluster(void); void print_pgstatprogresscopy(void); void print_pgstatprogresscreateindex(void); void print_pgstatprogressvacuum(void); void print_pgstatwaitevent(void); void print_buffercache(void); void print_deadlivestats(void); void print_repslotsstats(void); void print_tempfilestats(void); void print_xlogstats(void); void print_pgbouncerpools(void); void print_pgbouncerstats(void); void fetch_version(void); char *fetch_setting(char *name); void fetch_pgbuffercache_namespace(void); void fetch_pgstatstatements_namespace(void); bool backend_minimum_version(int major, int minor); void print_header(void); void print_line(void); void allocate_struct(void); static void needhdr(int dummy); static void needresize(int); void doresize(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s gathers statistics from a PostgreSQL database.\n\n" "Usage:\n" " %s [OPTIONS] [delay [count]]\n" "\nGeneral options:\n" " -f FILTER include only this object\n" " (only works for database, table, tableio,\n" " index, function, statement statistics,\n" " replication slots, and slru)\n" " -H display human-readable values\n" " -n do not redisplay header\n" " -s STAT stats to collect\n" " -S SUBSTAT part of stats to display\n" " (only works for database and statement)\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n" "\nThe default stat is pg_stat_bgwriter, but you can change it with\n" "the -s command line option, and one of its value (STAT):\n" " * archiver for pg_stat_archiver (only for 9.4+)\n" " * bgwriter for pg_stat_bgwriter\n" " * buffercache for pg_buffercache (needs the extension)\n" " * checkpointer for pg_stat_bgwriter (<17) or\n" " for pg_stat_checkpointer (17+)\n" " * connection (only for 9.2+)\n" " * database for pg_stat_database\n" " * table for pg_stat_all_tables\n" " * tableio for pg_statio_all_tables\n" " * index for pg_stat_all_indexes\n" " * function for pg_stat_user_function\n" " * statement for pg_stat_statements (needs the extension)\n" " * slru for pg_stat_slru (only for 13+)\n" " * xlog for xlog writes (only for 9.2+)\n" " * deadlive for dead/live tuples stats\n" " * repslots for replication slots\n" " * tempfile for temporary file usage\n" " * waitevent for wait events usage\n" " * wal for pg_stat_wal (only for 14+)\n" " * progress_analyze for analyze progress monitoring (only for\n" " 13+)\n" " * progress_basebackup for base backup progress monitoring (only\n" " for 13+)\n" " * progress_cluster for cluster progress monitoring (only for\n" " 12+)\n" " * progress_copy for copy progress monitoring (only for\n" " 14+)\n" " * progress_createindex for create index progress monitoring (only\n" " for 12+)\n" " * progress_vacuum for vacuum progress monitoring (only for\n" " 9.6+)\n" " * pbpools for pgBouncer pools statistics\n" " * pbstats for pgBouncer statistics\n\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->dontredisplayheader = false; opts->stat = NONE; opts->substat = NULL; opts->filter = NULL; opts->human_readable = false; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; opts->namespace = NULL; opts->interval = 1; opts->count = -1; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgstats " PGSTAT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:Hp:U:d:f:ns:S:v")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* specify the filter */ case 'f': opts->filter = pg_strdup(optarg); break; /* do not redisplay the header */ case 'n': opts->dontredisplayheader = true; break; /* don't show headers */ case 'v': opts->verbose = true; break; /* specify the stat */ case 's': if (opts->stat != NONE) { pg_log_error("You can only use once the -s command line switch.\n"); exit(EXIT_FAILURE); } if (!strcmp(optarg, "archiver")) { opts->stat = ARCHIVER; } else if (!strcmp(optarg, "bgwriter")) { opts->stat = BGWRITER; } else if (!strcmp(optarg, "buffercache")) { opts->stat = BUFFERCACHE; } else if (!strcmp(optarg, "checkpointer")) { opts->stat = CHECKPOINTER; } else if (!strcmp(optarg, "connection")) { opts->stat = CONNECTION; } else if (!strcmp(optarg, "database")) { opts->stat = DATABASE; } else if (!strcmp(optarg, "table")) { opts->stat = TABLE; } else if (!strcmp(optarg, "tableio")) { opts->stat = TABLEIO; } else if (!strcmp(optarg, "index")) { opts->stat = INDEX; } else if (!strcmp(optarg, "function")) { opts->stat = FUNCTION; } else if (!strcmp(optarg, "statement")) { opts->stat = STATEMENT; } else if (!strcmp(optarg, "slru")) { opts->stat = SLRU; } else if (!strcmp(optarg, "wal")) { opts->stat = WAL; } else if (!strcmp(optarg, "xlog")) { opts->stat = XLOG; } else if (!strcmp(optarg, "deadlive")) { opts->stat = DEADLIVE; } else if (!strcmp(optarg, "repslots")) { opts->stat = REPSLOTS; } else if (!strcmp(optarg, "tempfile")) { opts->stat = TEMPFILE; } else if (!strcmp(optarg, "waitevent")) { opts->stat = WAITEVENT; } else if (!strcmp(optarg, "progress_analyze")) { opts->stat = PROGRESS_ANALYZE; } else if (!strcmp(optarg, "progress_basebackup")) { opts->stat = PROGRESS_BASEBACKUP; } else if (!strcmp(optarg, "progress_cluster")) { opts->stat = PROGRESS_CLUSTER; } else if (!strcmp(optarg, "progress_copy")) { opts->stat = PROGRESS_COPY; } else if (!strcmp(optarg, "progress_createindex")) { opts->stat = PROGRESS_CREATEINDEX; } else if (!strcmp(optarg, "progress_vacuum")) { opts->stat = PROGRESS_VACUUM; } else if (!strcmp(optarg, "pbpools")) { opts->stat = PBPOOLS; } else if (!strcmp(optarg, "pbstats")) { opts->stat = PBSTATS; } else { pg_log_error("Unknown service \"%s\".\n", optarg); pg_log_info("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } break; /* specify the substat */ case 'S': opts->substat = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* display human-readable values */ case 'H': opts->human_readable = true; break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; default: pg_log_error("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } if (optind < argc) { opts->interval = atoi(argv[optind]); if (opts->interval == 0) { pg_log_error("Invalid delay.\n"); pg_log_info("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } optind++; } if (optind < argc) { opts->count = atoi(argv[optind]); if (opts -> count == 0) { pg_log_error("Invalid count.\n"); pg_log_info("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } if (opts->stat == PBPOOLS || opts->stat == PBSTATS) { /* * Set (or override) database name. * It should always be pgbouncer */ opts->dbname = pg_strdup("pgbouncer"); } if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { pg_log_error("out of memory (pg_malloc)\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { pg_log_error("cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { pg_log_error("out of memory (pg_strdup)\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Display metrics with a size unit */ char *pg_size_pretty(long long size) { char *buf; const struct size_pretty_unit *SIZE_UNIT; buf = malloc( sizeof(char) * (64+1)); for (SIZE_UNIT = size_pretty_units; SIZE_UNIT->name != NULL; SIZE_UNIT++) { long bits; long long abs_size = size < 0 ? 0 - size : size; if (SIZE_UNIT[1].name == NULL || abs_size < SIZE_UNIT->limit) { if (SIZE_UNIT->round) size = half_rounded(size); snprintf(buf, sizeof(buf), "%lld %s", size, SIZE_UNIT->name); break; } bits = (SIZE_UNIT[1].unitbits - SIZE_UNIT->unitbits - (SIZE_UNIT[1].round == true) + (SIZE_UNIT->round == true)); size /= 1 << bits; } return(buf); } /* * Display metrics with a unit */ char *pg_nosize_pretty(long long size) { char *buf; const struct nosize_pretty_unit *SIZE_UNIT; buf = malloc( sizeof(char) * (64+1)); for (SIZE_UNIT = nosize_pretty_units; SIZE_UNIT->name != NULL; SIZE_UNIT++) { if (SIZE_UNIT[1].name == NULL || size < SIZE_UNIT->limit) { snprintf(buf, sizeof(buf), "%lld %s", size, SIZE_UNIT->name); break; } size = size / SIZE_UNIT->divider; } return(buf); } /* * Format a long long value as a string */ void format(char *r, long long value, long length, unit_t unit) { char v[64] = ""; // check if pretty print if (unit == NO_UNIT) { sprintf(v, "%lld", value); } else { long long abs_value = value < 0 ? 0 - value : value; sprintf(v, "%s%s", value < 0 ? "-":"", unit == SIZE_UNIT ? pg_size_pretty(abs_value) : pg_nosize_pretty(abs_value) ); } // check for overflow if (length < strlen(v)) { // Overflow! sprintf(v, "!OF!"); } // initialize with empty string strcpy(r, ""); // add spaces for(long i=0; i'%s' " "FROM pg_stat_archiver ", previous_pgstatarchiver->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ archived_count = atol(PQgetvalue(res, row, column++)); failed_count = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatarchiver->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_archiver has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_archived_count, archived_count - previous_pgstatarchiver->archived_count, 8, NO_UNIT); format(r_failed_count, failed_count - previous_pgstatarchiver->failed_count, 8, NO_UNIT); (void)printf(" %s %s\n", r_archived_count, r_failed_count); /* setting the new old value */ previous_pgstatarchiver->archived_count = archived_count; previous_pgstatarchiver->failed_count = failed_count; previous_pgstatarchiver->stats_reset = stats_reset; } /* cleanup */ free(r_archived_count); free(r_failed_count); PQclear(res); } /* * dump all bgwriter stats. */ void print_pgstatbgwriter() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long buffers_clean = 0; long maxwritten_clean = 0; long buffers_alloc = 0; char *stats_reset; bool has_been_reset; char *r_buffers_clean = (char *)malloc(sizeof(char) * (10 + 1)); char *r_maxwritten_clean = (char *)malloc(sizeof(char) * (10 + 1)); char *r_buffers_alloc = (char *)malloc(sizeof(char) * (10 + 1)); /* grab the stats (this is the only stats on one line) */ snprintf(sql, sizeof(sql), "select buffers_clean, maxwritten_clean, buffers_alloc, stats_reset, stats_reset>'%s' " "from pg_stat_bgwriter ", previous_pgstatbgwriter->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ buffers_clean = atol(PQgetvalue(res, row, column++)); maxwritten_clean = atol(PQgetvalue(res, row, column++)); buffers_alloc = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatbgwriter->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_bgwriter has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_buffers_clean, buffers_clean - previous_pgstatbgwriter->buffers_clean, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_maxwritten_clean, buffers_alloc - previous_pgstatbgwriter->buffers_alloc, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_buffers_alloc, maxwritten_clean - previous_pgstatbgwriter->maxwritten_clean, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s\n", r_buffers_clean, r_maxwritten_clean, r_buffers_alloc); /* setting the new old value */ previous_pgstatbgwriter->buffers_clean = buffers_clean; previous_pgstatbgwriter->maxwritten_clean = maxwritten_clean; previous_pgstatbgwriter->buffers_alloc = buffers_alloc; previous_pgstatbgwriter->stats_reset = stats_reset; } /* cleanup */ free(r_buffers_clean); free(r_maxwritten_clean); free(r_buffers_alloc); PQclear(res); } /* * dump all checkpointer stats. */ void print_pgstatcheckpointer() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long checkpoints_timed = 0; long checkpoints_requested = 0; long restartpoints_timed = 0; long restartpoints_requested = 0; long restartpoints_done = 0; long write_time = 0; long sync_time = 0; long buffers_written = 0; char *stats_reset; bool has_been_reset; char *r_checkpoints_timed = (char *)malloc(sizeof(char) * (9 + 1)); char *r_checkpoints_requested = (char *)malloc(sizeof(char) * (9 + 1)); char *r_restartpoints_timed = (char *)malloc(sizeof(char) * (9 + 1)); char *r_restartpoints_requested = (char *)malloc(sizeof(char) * (9 + 1)); char *r_restartpoints_done = (char *)malloc(sizeof(char) * (9 + 1)); char *r_write_time = (char *)malloc(sizeof(char) * (6 + 1)); char *r_sync_time = (char *)malloc(sizeof(char) * (6 + 1)); char *r_buffers_written = (char *)malloc(sizeof(char) * (7 + 1)); /* grab the stats (this is the only stats on one line) */ if (backend_minimum_version(17, 0)) { snprintf(sql, sizeof(sql), "select num_timed, num_requested, restartpoints_timed, restartpoints_req, " "restartpoints_done, write_time, sync_time, buffers_written, " "stats_reset, stats_reset>'%s' " "from pg_stat_checkpointer ", previous_pgstatcheckpointer->stats_reset); } else { snprintf(sql, sizeof(sql), "select checkpoints_timed, checkpoints_req, %sbuffers_checkpoint, " "stats_reset, stats_reset>'%s' " "from pg_stat_bgwriter ", backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "", previous_pgstatcheckpointer->stats_reset); } /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ checkpoints_timed = atol(PQgetvalue(res, row, column++)); checkpoints_requested = atol(PQgetvalue(res, row, column++)); if (backend_minimum_version(17, 0)) { restartpoints_timed = atol(PQgetvalue(res, row, column++)); restartpoints_requested = atol(PQgetvalue(res, row, column++)); restartpoints_done = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 2)) { write_time = atol(PQgetvalue(res, row, column++)); sync_time = atol(PQgetvalue(res, row, column++)); } buffers_written = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatcheckpointer->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_%s has been reset!\n", backend_minimum_version(17, 0) ? "checkpointer" : "bgwriter"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_checkpoints_timed, checkpoints_timed - previous_pgstatcheckpointer->checkpoints_timed, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_checkpoints_requested, checkpoints_requested - previous_pgstatcheckpointer->checkpoints_requested, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_restartpoints_timed, restartpoints_timed - previous_pgstatcheckpointer->restartpoints_timed, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_restartpoints_requested, restartpoints_requested - previous_pgstatcheckpointer->restartpoints_requested, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_restartpoints_done, restartpoints_done - previous_pgstatcheckpointer->restartpoints_done, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format_time(r_write_time, write_time - previous_pgstatcheckpointer->write_time, 6); format_time(r_sync_time, sync_time - previous_pgstatcheckpointer->sync_time, 6); format(r_buffers_written, buffers_written - previous_pgstatcheckpointer->buffers_written, 7, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s", r_checkpoints_timed, r_checkpoints_requested); if (backend_minimum_version(17, 0)) { (void)printf(" %s %s %s", r_restartpoints_timed, r_restartpoints_requested, r_restartpoints_done ); } if (backend_minimum_version(9, 2)) { (void)printf(" %s %s", r_write_time, r_sync_time ); } (void)printf(" %s\n", r_buffers_written ); /* setting the new old value */ previous_pgstatcheckpointer->checkpoints_timed = checkpoints_timed; previous_pgstatcheckpointer->checkpoints_requested = checkpoints_requested; previous_pgstatcheckpointer->restartpoints_timed = restartpoints_timed; previous_pgstatcheckpointer->restartpoints_requested = restartpoints_requested; previous_pgstatcheckpointer->restartpoints_done = restartpoints_requested; previous_pgstatcheckpointer->write_time = write_time; previous_pgstatcheckpointer->sync_time = sync_time; previous_pgstatcheckpointer->buffers_written = buffers_written; previous_pgstatcheckpointer->stats_reset = stats_reset; } /* cleanup */ free(r_checkpoints_timed); free(r_checkpoints_requested); free(r_restartpoints_timed); free(r_restartpoints_requested); free(r_restartpoints_done); free(r_write_time); free(r_sync_time); free(r_buffers_written); PQclear(res); } /* * Dump all connection stats. */ void print_pgstatconnection() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long total = 0; long active = 0; long lockwaiting = 0; long idleintransaction = 0; long idle = 0; char *r_total = (char *)malloc(sizeof(char) * (5 + 1)); char *r_active = (char *)malloc(sizeof(char) * (5 + 1)); char *r_lockwaiting = (char *)malloc(sizeof(char) * (5 + 1)); char *r_idleintransaction = (char *)malloc(sizeof(char) * (5 + 1)); char *r_idle = (char *)malloc(sizeof(char) * (5 + 1)); if (backend_minimum_version(10, 0)) { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND wait_event IS NULL " "THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN state='active' AND wait_event IS NOT NULL " "THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity " "WHERE backend_type='client backend'"); } else if (backend_minimum_version(9, 6)) { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND wait_event IS NULL THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN state='active' AND wait_event IS NOT NULL THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity"); } else { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND NOT waiting THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN waiting THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity"); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; total = atol(PQgetvalue(res, row, column++)); active = atol(PQgetvalue(res, row, column++)); lockwaiting = atol(PQgetvalue(res, row, column++)); idleintransaction = atol(PQgetvalue(res, row, column++)); idle = atol(PQgetvalue(res, row, column++)); /* printing the actual values for once */ format(r_total, total, 5, NO_UNIT); format(r_active, active, 5, NO_UNIT); format(r_lockwaiting, lockwaiting, 5, NO_UNIT); format(r_idleintransaction, idleintransaction, 5, NO_UNIT); format(r_idle, idle, 5, NO_UNIT); (void)printf(" %s %s %s %s %s\n", r_total, r_active, r_lockwaiting, r_idleintransaction, r_idle); } /* cleanup */ free(r_total); free(r_active); free(r_lockwaiting); free(r_idleintransaction); free(r_idle); PQclear(res); } /* * Dump all database stats. */ void print_pgstatdatabase() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long numbackends = 0; long xact_commit = 0; long xact_rollback = 0; long blks_read = 0; long blks_hit = 0; long tup_returned = 0; long tup_fetched = 0; long tup_inserted = 0; long tup_updated = 0; long tup_deleted = 0; long conflicts = 0; long temp_files = 0; long temp_bytes = 0; long deadlocks = 0; long checksum_failures = 0; float blk_read_time = 0; float blk_write_time = 0; float session_time = 0; float active_time = 0; float idle_in_transaction_time = 0; long sessions = 0; long sessions_abandoned = 0; long sessions_fatal = 0; long sessions_killed = 0; char *stats_reset; bool has_been_reset; float hit_ratio; char *r1 = (char *)malloc(sizeof(char) * (12 + 1)); char *r2 = (char *)malloc(sizeof(char) * (12 + 1)); char *r3 = (char *)malloc(sizeof(char) * (12 + 1)); char *r4 = (char *)malloc(sizeof(char) * (12 + 1)); char *r5 = (char *)malloc(sizeof(char) * (12 + 1)); char *r6 = (char *)malloc(sizeof(char) * (12 + 1)); char *r7 = (char *)malloc(sizeof(char) * (12 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(numbackends), sum(xact_commit), sum(xact_rollback), sum(blks_read), sum(blks_hit)" ", max(stats_reset), max(stats_reset)>'%s'" "%s%s%s%s%s " "FROM pg_stat_database ", previous_pgstatdatabase->stats_reset, backend_minimum_version(8, 3) ? ", sum(tup_returned), sum(tup_fetched), sum(tup_inserted), sum(tup_updated), sum(tup_deleted)" : "", backend_minimum_version(9, 1) ? ", sum(conflicts)" : "", backend_minimum_version(9, 2) ? ", sum(temp_files), sum(temp_bytes), sum(deadlocks), sum(blk_read_time), sum(blk_write_time)" : "", backend_minimum_version(12, 0) ? ", sum(checksum_failures)" : "", backend_minimum_version(14, 0) ? ", sum(session_time), sum(active_time), sum(idle_in_transaction_time), sum(sessions), sum(sessions_abandoned), sum(sessions_fatal), sum(sessions_killed)" : ""); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit" ", stats_reset, stats_reset>'%s'" "%s%s%s%s%s " "FROM pg_stat_database " "WHERE datname=$1", previous_pgstatdatabase->stats_reset, backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "", backend_minimum_version(9, 1) ? ", conflicts" : "", backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : "", backend_minimum_version(12, 0) ? ", checksum_failures" : "", backend_minimum_version(14, 0) ? ", session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed" : ""); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ numbackends = atol(PQgetvalue(res, row, column++)); xact_commit = atol(PQgetvalue(res, row, column++)); xact_rollback = atol(PQgetvalue(res, row, column++)); blks_read = atol(PQgetvalue(res, row, column++)); blks_hit = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatdatabase->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (backend_minimum_version(8, 3)) { tup_returned = atol(PQgetvalue(res, row, column++)); tup_fetched = atol(PQgetvalue(res, row, column++)); tup_inserted = atol(PQgetvalue(res, row, column++)); tup_updated = atol(PQgetvalue(res, row, column++)); tup_deleted = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 1)) { conflicts = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 2)) { temp_files = atol(PQgetvalue(res, row, column++)); temp_bytes = atol(PQgetvalue(res, row, column++)); deadlocks = atol(PQgetvalue(res, row, column++)); blk_read_time = atof(PQgetvalue(res, row, column++)); blk_write_time = atof(PQgetvalue(res, row, column++)); } if (backend_minimum_version(12, 0)) { checksum_failures = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(14, 0)) { session_time = atof(PQgetvalue(res, row, column++)); active_time = atof(PQgetvalue(res, row, column++)); idle_in_transaction_time = atof(PQgetvalue(res, row, column++)); sessions = atol(PQgetvalue(res, row, column++)); sessions_abandoned = atol(PQgetvalue(res, row, column++)); sessions_fatal = atol(PQgetvalue(res, row, column++)); sessions_killed = atol(PQgetvalue(res, row, column++)); } if (has_been_reset) { (void)printf("pg_stat_database has been reset!\n"); } // calculate hit ratio if (blks_hit - previous_pgstatdatabase->blks_hit + blks_read - previous_pgstatdatabase->blks_read > 0) hit_ratio = 100.*(blks_hit - previous_pgstatdatabase->blks_hit)/(blks_hit - previous_pgstatdatabase->blks_hit + blks_read - previous_pgstatdatabase->blks_read); else hit_ratio = 0; /* printing the diff... * note that the first line will be the current value, rather than the diff */ if (opts->substat == NULL || strstr(opts->substat, "backends") != NULL) { format(r1, numbackends, 8, NO_UNIT); (void)printf(" %s", r1); } if (opts->substat == NULL || strstr(opts->substat, "xacts") != NULL) { format(r1, xact_commit - previous_pgstatdatabase->xact_commit, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, xact_rollback - previous_pgstatdatabase->xact_rollback, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s", r1, r2); } if (opts->substat == NULL || strstr(opts->substat, "blocks") != NULL) { format(r1, blks_read - previous_pgstatdatabase->blks_read, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, blks_hit - previous_pgstatdatabase->blks_hit, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r3, hit_ratio, 5, NO_UNIT); (void)printf(" %s %s %s", r1, r2, r3); if (backend_minimum_version(9, 2)) { format_time(r4, blk_read_time - previous_pgstatdatabase->blk_read_time, 9); format_time(r5, blk_write_time - previous_pgstatdatabase->blk_write_time, 9); (void)printf(" %s %s", r4, r5); } } if ((opts->substat == NULL || strstr(opts->substat, "tuples") != NULL) && backend_minimum_version(8, 3)) { format(r1, tup_returned - previous_pgstatdatabase->tup_returned, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, tup_fetched - previous_pgstatdatabase->tup_fetched, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r3, tup_inserted - previous_pgstatdatabase->tup_inserted, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r4, tup_updated - previous_pgstatdatabase->tup_updated, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r5, tup_deleted - previous_pgstatdatabase->tup_deleted, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s", r1, r2, r3, r4, r5); } if ((opts->substat == NULL || strstr(opts->substat, "temp") != NULL) && backend_minimum_version(9, 2)) { format(r1, temp_files - previous_pgstatdatabase->temp_files, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, temp_bytes - previous_pgstatdatabase->temp_bytes, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s", r1, r2); } if ((opts->substat == NULL || strstr(opts->substat, "session") != NULL) && backend_minimum_version(14, 0)) { format_time(r1, session_time - previous_pgstatdatabase->session_time, 11); format_time(r2, active_time - previous_pgstatdatabase->active_time, 11); format_time(r3, idle_in_transaction_time - previous_pgstatdatabase->idle_in_transaction_time, 11); format(r4, sessions - previous_pgstatdatabase->sessions, 7, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r5, sessions_abandoned - previous_pgstatdatabase->sessions_abandoned, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r6, sessions_fatal - previous_pgstatdatabase->sessions_fatal, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r7, sessions_killed - previous_pgstatdatabase->sessions_killed, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s %s %s ", r1, r2, r3, r4, r5, r6, r7); } if ((opts->substat == NULL || strstr(opts->substat, "misc") != NULL) && backend_minimum_version(9, 1)) { if (backend_minimum_version(9, 1)) { format(r1, conflicts - previous_pgstatdatabase->conflicts, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s", r1); } if (backend_minimum_version(9, 2)) { format(r2, deadlocks - previous_pgstatdatabase->deadlocks, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s", r2); } if (backend_minimum_version(12, 0)) { format(r3, checksum_failures - previous_pgstatdatabase->checksum_failures, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s", r3); } } (void)printf("\n"); /* setting the new old value */ previous_pgstatdatabase->xact_commit = xact_commit; previous_pgstatdatabase->xact_rollback = xact_rollback; previous_pgstatdatabase->blks_read = blks_read; previous_pgstatdatabase->blks_hit = blks_hit; previous_pgstatdatabase->tup_returned = tup_returned; previous_pgstatdatabase->tup_fetched = tup_fetched; previous_pgstatdatabase->tup_inserted = tup_inserted; previous_pgstatdatabase->tup_updated = tup_updated; previous_pgstatdatabase->tup_deleted = tup_deleted; previous_pgstatdatabase->conflicts = conflicts; previous_pgstatdatabase->temp_files = temp_files; previous_pgstatdatabase->temp_bytes = temp_bytes; previous_pgstatdatabase->deadlocks = deadlocks; previous_pgstatdatabase->blk_read_time = blk_read_time; previous_pgstatdatabase->blk_write_time = blk_write_time; previous_pgstatdatabase->checksum_failures = checksum_failures; previous_pgstatdatabase->session_time = session_time; previous_pgstatdatabase->active_time = active_time; previous_pgstatdatabase->idle_in_transaction_time = idle_in_transaction_time; previous_pgstatdatabase->sessions = sessions; previous_pgstatdatabase->sessions_abandoned = sessions_abandoned; previous_pgstatdatabase->sessions_fatal = sessions_fatal; previous_pgstatdatabase->sessions_killed = sessions_killed; if (strlen(stats_reset) == 0) previous_pgstatdatabase->stats_reset = PGSTAT_OLDEST_STAT_RESET; else previous_pgstatdatabase->stats_reset = stats_reset; } /* cleanup */ free(r1); free(r2); free(r3); free(r4); free(r5); free(r6); free(r7); PQclear(res); } /* * Dump all table stats. */ void print_pgstattable() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long seq_scan = 0; long seq_tup_read = 0; long idx_scan = 0; long idx_tup_fetch = 0; long n_tup_ins = 0; long n_tup_upd = 0; long n_tup_del = 0; long n_tup_hot_upd = 0; long n_tup_newpage_upd = 0; long n_live_tup = 0; long n_dead_tup = 0; long n_mod_since_analyze = 0; long n_ins_since_vacuum = 0; long vacuum_count = 0; long autovacuum_count = 0; long analyze_count = 0; long autoanalyze_count = 0; char *r_seq_scan = (char *)malloc(sizeof(char) * (6 + 1)); char *r_seq_tup_read = (char *)malloc(sizeof(char) * (6 + 1)); char *r_idx_scan = (char *)malloc(sizeof(char) * (6 + 1)); char *r_idx_tup_fetch = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_tup_ins = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_tup_upd = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_tup_del = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_tup_hot_upd = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_tup_newpage_upd = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_live_tup = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_dead_tup = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_mod_since_analyze = (char *)malloc(sizeof(char) * (6 + 1)); char *r_n_ins_since_vacuum = (char *)malloc(sizeof(char) * (6 + 1)); char *r_vacuum_count = (char *)malloc(sizeof(char) * (6 + 1)); char *r_autovacuum_count = (char *)malloc(sizeof(char) * (6 + 1)); char *r_analyze_count = (char *)malloc(sizeof(char) * (6 + 1)); char *r_autoanalyze_count = (char *)malloc(sizeof(char) * (6 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(seq_scan), sum(seq_tup_read), sum(idx_scan), sum(idx_tup_fetch), sum(n_tup_ins), " "sum(n_tup_upd), sum(n_tup_del)" "%s" "%s" "%s" "%s" "%s" " FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' ", backend_minimum_version(16, 0) ? ", sum(n_tup_newpage_upd)" : "", backend_minimum_version(8, 3) ? ", sum(n_tup_hot_upd), sum(n_live_tup), sum(n_dead_tup)" : "", backend_minimum_version(9, 4) ? ", sum(n_mod_since_analyze)" : "", backend_minimum_version(13, 0) ? ", sum(n_ins_since_vacuum)" : "", backend_minimum_version(9, 1) ? ", sum(vacuum_count), sum(autovacuum_count), sum(analyze_count), sum(autoanalyze_count)" : ""); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT sum(seq_scan), sum(seq_tup_read), sum(idx_scan), sum(idx_tup_fetch), sum(n_tup_ins), " "sum(n_tup_upd), sum(n_tup_del)" "%s" "%s" "%s" "%s" "%s" " FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' " " AND relname = $1", backend_minimum_version(16, 0) ? ", sum(n_tup_newpage_upd)" : "", backend_minimum_version(8, 3) ? ", sum(n_tup_hot_upd), sum(n_live_tup), sum(n_dead_tup)" : "", backend_minimum_version(9, 4) ? ", sum(n_mod_since_analyze)" : "", backend_minimum_version(13, 0) ? ", sum(n_ins_since_vacuum)" : "", backend_minimum_version(9, 1) ? ", sum(vacuum_count), sum(autovacuum_count), sum(analyze_count), sum(autoanalyze_count)" : ""); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ seq_scan = atol(PQgetvalue(res, row, column++)); seq_tup_read = atol(PQgetvalue(res, row, column++)); idx_scan = atol(PQgetvalue(res, row, column++)); idx_tup_fetch = atol(PQgetvalue(res, row, column++)); n_tup_ins = atol(PQgetvalue(res, row, column++)); n_tup_upd = atol(PQgetvalue(res, row, column++)); n_tup_del = atol(PQgetvalue(res, row, column++)); if (backend_minimum_version(16, 0)) { n_tup_newpage_upd = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(8, 3)) { n_tup_hot_upd = atol(PQgetvalue(res, row, column++)); n_live_tup = atol(PQgetvalue(res, row, column++)); n_dead_tup = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 4)) { n_mod_since_analyze = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(13, 0)) { n_ins_since_vacuum = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 1)) { vacuum_count = atol(PQgetvalue(res, row, column++)); autovacuum_count = atol(PQgetvalue(res, row, column++)); analyze_count = atol(PQgetvalue(res, row, column++)); autoanalyze_count = atol(PQgetvalue(res, row, column++)); } /* printing the diff... note that the first line will be the current value, rather than the diff */ format(r_seq_scan, seq_scan - previous_pgstattable->seq_scan, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_seq_tup_read, seq_tup_read - previous_pgstattable->seq_tup_read, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_scan, idx_scan - previous_pgstattable->idx_scan, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_tup_fetch, idx_tup_fetch - previous_pgstattable->idx_tup_fetch, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_tup_ins, n_tup_ins - previous_pgstattable->n_tup_ins, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_tup_upd, n_tup_upd - previous_pgstattable->n_tup_upd, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_tup_del, n_tup_del - previous_pgstattable->n_tup_del, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_tup_hot_upd, n_tup_hot_upd - previous_pgstattable->n_tup_hot_upd, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_tup_newpage_upd, n_tup_newpage_upd - previous_pgstattable->n_tup_newpage_upd, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_live_tup, n_live_tup - previous_pgstattable->n_live_tup, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_dead_tup, n_dead_tup - previous_pgstattable->n_dead_tup, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_mod_since_analyze, n_mod_since_analyze - previous_pgstattable->n_mod_since_analyze, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_n_ins_since_vacuum, n_ins_since_vacuum - previous_pgstattable->n_ins_since_vacuum, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_vacuum_count, vacuum_count - previous_pgstattable->vacuum_count, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_autovacuum_count, autovacuum_count - previous_pgstattable->autovacuum_count, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_analyze_count, analyze_count - previous_pgstattable->analyze_count, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_autoanalyze_count, autoanalyze_count - previous_pgstattable->autoanalyze_count, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s %s %s", r_seq_scan, r_seq_tup_read, r_idx_scan, r_idx_tup_fetch, r_n_tup_ins, r_n_tup_upd, r_n_tup_del ); if (backend_minimum_version(8, 3)) { (void)printf(" %s", r_n_tup_hot_upd ); } if (backend_minimum_version(16, 0)) { (void)printf(" %s", r_n_tup_newpage_upd ); } if (backend_minimum_version(8, 3)) { (void)printf(" %s %s", r_n_live_tup, r_n_dead_tup ); } if (backend_minimum_version(9, 4)) { (void)printf(" %s", r_n_mod_since_analyze ); } if (backend_minimum_version(13, 0)) { (void)printf(" %s", r_n_ins_since_vacuum ); } if (backend_minimum_version(9, 1)) { (void)printf(" %s %s %s %s", r_vacuum_count, r_autovacuum_count, r_analyze_count, r_autoanalyze_count ); } (void)printf("\n"); /* setting the new old value */ previous_pgstattable->seq_scan = seq_scan; previous_pgstattable->seq_tup_read = seq_tup_read; previous_pgstattable->idx_scan = idx_scan; previous_pgstattable->idx_tup_fetch = idx_tup_fetch; previous_pgstattable->n_tup_ins = n_tup_ins; previous_pgstattable->n_tup_upd = n_tup_upd; previous_pgstattable->n_tup_del = n_tup_del; previous_pgstattable->n_tup_hot_upd = n_tup_hot_upd; previous_pgstattable->n_tup_newpage_upd = n_tup_newpage_upd; previous_pgstattable->n_live_tup = n_live_tup; previous_pgstattable->n_dead_tup = n_dead_tup; previous_pgstattable->n_mod_since_analyze = n_mod_since_analyze; previous_pgstattable->n_ins_since_vacuum = n_ins_since_vacuum; previous_pgstattable->vacuum_count = vacuum_count; previous_pgstattable->autovacuum_count = autovacuum_count; previous_pgstattable->analyze_count = analyze_count; previous_pgstattable->autoanalyze_count = autoanalyze_count; } /* cleanup */ free(r_seq_scan); free(r_seq_tup_read); free(r_idx_scan); free(r_idx_tup_fetch); free(r_n_tup_ins); free(r_n_tup_upd); free(r_n_tup_del); free(r_n_tup_hot_upd); free(r_n_tup_newpage_upd); free(r_n_live_tup); free(r_n_dead_tup); free(r_n_mod_since_analyze); free(r_n_ins_since_vacuum); free(r_vacuum_count); free(r_autovacuum_count); free(r_analyze_count); free(r_autoanalyze_count); PQclear(res); } /* * Dump all table IO stats. */ void print_pgstattableio() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long heap_blks_read = 0; long heap_blks_hit = 0; long idx_blks_read = 0; long idx_blks_hit = 0; long toast_blks_read = 0; long toast_blks_hit = 0; long tidx_blks_read = 0; long tidx_blks_hit = 0; char *r_heap_blks_read = (char *)malloc(sizeof(char) * (8 + 1)); char *r_heap_blks_hit = (char *)malloc(sizeof(char) * (8 + 1)); char *r_idx_blks_read = (char *)malloc(sizeof(char) * (8 + 1)); char *r_idx_blks_hit = (char *)malloc(sizeof(char) * (8 + 1)); char *r_toast_blks_read = (char *)malloc(sizeof(char) * (8 + 1)); char *r_toast_blks_hit = (char *)malloc(sizeof(char) * (8 + 1)); char *r_tidx_blks_read = (char *)malloc(sizeof(char) * (8 + 1)); char *r_tidx_blks_hit = (char *)malloc(sizeof(char) * (8 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(heap_blks_read), sum(heap_blks_hit), sum(idx_blks_read), sum(idx_blks_hit), " "sum(toast_blks_read), sum(toast_blks_hit), sum(tidx_blks_read), sum(tidx_blks_hit) " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, " "toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' " " AND relname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ heap_blks_read = atol(PQgetvalue(res, row, column++)); heap_blks_hit = atol(PQgetvalue(res, row, column++)); idx_blks_read = atol(PQgetvalue(res, row, column++)); idx_blks_hit = atol(PQgetvalue(res, row, column++)); toast_blks_read = atol(PQgetvalue(res, row, column++)); toast_blks_hit = atol(PQgetvalue(res, row, column++)); tidx_blks_read = atol(PQgetvalue(res, row, column++)); tidx_blks_hit = atol(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_heap_blks_read, heap_blks_read - previous_pgstattableio->heap_blks_read, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_heap_blks_hit, heap_blks_hit - previous_pgstattableio->heap_blks_hit, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_blks_read, idx_blks_read - previous_pgstattableio->idx_blks_read, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_blks_hit, idx_blks_hit - previous_pgstattableio->idx_blks_hit, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_toast_blks_read, toast_blks_read - previous_pgstattableio->toast_blks_read, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_toast_blks_hit, toast_blks_hit - previous_pgstattableio->toast_blks_hit, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_tidx_blks_read, tidx_blks_read - previous_pgstattableio->tidx_blks_read, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_tidx_blks_hit, tidx_blks_hit - previous_pgstattableio->tidx_blks_hit, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s %s %s %s\n", r_heap_blks_read, r_heap_blks_hit, r_idx_blks_read, r_idx_blks_hit, r_toast_blks_read, r_toast_blks_hit, r_tidx_blks_read, r_tidx_blks_hit ); /* setting the new old value */ previous_pgstattableio->heap_blks_read = heap_blks_read; previous_pgstattableio->heap_blks_hit = heap_blks_hit; previous_pgstattableio->idx_blks_read = idx_blks_read; previous_pgstattableio->idx_blks_hit = idx_blks_hit; previous_pgstattableio->toast_blks_read = toast_blks_read; previous_pgstattableio->toast_blks_hit = toast_blks_hit; previous_pgstattableio->tidx_blks_read = tidx_blks_read; previous_pgstattableio->tidx_blks_hit = tidx_blks_hit; } /* cleanup */ free(r_heap_blks_read); free(r_heap_blks_hit); free(r_idx_blks_read); free(r_idx_blks_hit); free(r_toast_blks_read); free(r_toast_blks_hit); free(r_tidx_blks_read); free(r_tidx_blks_hit); PQclear(res); } /* * Dump all index stats. */ void print_pgstatindex() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long idx_scan = 0; long idx_tup_read = 0; long idx_tup_fetch = 0; char *r_idx_scan = (char *)malloc(sizeof(char) * (8 + 1)); char *r_idx_tup_read = (char *)malloc(sizeof(char) * (8 + 1)); char *r_idx_tup_fetch = (char *)malloc(sizeof(char) * (8 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(idx_scan), sum(idx_tup_read), sum(idx_tup_fetch) " " FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT idx_scan, idx_tup_read, idx_tup_fetch " "FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' " " AND indexrelname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ idx_scan = atol(PQgetvalue(res, row, column++)); idx_tup_read = atof(PQgetvalue(res, row, column++)); idx_tup_fetch = atof(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_idx_scan, idx_scan - previous_pgstatindex->idx_scan, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_tup_read, idx_tup_read - previous_pgstatindex->idx_tup_read, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_idx_tup_fetch, idx_tup_fetch - previous_pgstatindex->idx_tup_fetch, 8, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s\n", r_idx_scan, r_idx_tup_read, r_idx_tup_fetch ); /* setting the new old value */ previous_pgstatindex->idx_scan = idx_scan; previous_pgstatindex->idx_tup_read = idx_tup_read; previous_pgstatindex->idx_tup_fetch = idx_tup_fetch; } /* cleanup */ free(r_idx_scan); free(r_idx_tup_read); free(r_idx_tup_fetch); PQclear(res); } /* * Dump all function stats. */ void print_pgstatfunction() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long calls = 0; float total_time = 0; float self_time = 0; char *r_calls = (char *)malloc(sizeof(char) * (9 + 1)); char *r_total_time = (char *)malloc(sizeof(char) * (10 + 1)); char *r_self_time = (char *)malloc(sizeof(char) * (10 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(calls), sum(total_time), sum(self_time) " "FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT calls, total_time, self_time " "FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' " " AND funcname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ calls = atol(PQgetvalue(res, row, column++)); total_time = atof(PQgetvalue(res, row, column++)); self_time = atof(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_calls, calls - previous_pgstatfunction->calls, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format_time(r_total_time, total_time - previous_pgstatfunction->total_time, 10); format_time(r_self_time, self_time - previous_pgstatfunction->self_time, 10); (void)printf(" %s %s %s\n", r_calls, r_total_time, r_self_time ); /* setting the new old value */ previous_pgstatfunction->calls = calls; previous_pgstatfunction->total_time = total_time; previous_pgstatfunction->self_time = self_time; } /* cleanup */ free(r_calls); free(r_total_time); free(r_self_time); PQclear(res); } /* * Dump all statement stats. */ void print_pgstatstatement() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; const char *paramValues[1]; PGresult *res; int nrows; int row, column; long plans = 0; float total_plan_time = 0; long calls = 0; float total_exec_time = 0; long rows = 0; long shared_blks_hit = 0; long shared_blks_read = 0; long shared_blks_dirtied = 0; long shared_blks_written = 0; long local_blks_hit = 0; long local_blks_read = 0; long local_blks_dirtied = 0; long local_blks_written = 0; long temp_blks_read = 0; long temp_blks_written = 0; float shared_blk_read_time = 0; float shared_blk_write_time = 0; float local_blk_read_time = 0; float local_blk_write_time = 0; float temp_blk_read_time = 0; float temp_blk_write_time = 0; long wal_records = 0; long wal_fpi = 0; long wal_bytes = 0; char *r1 = (char *)malloc(sizeof(char) * (20 + 1)); char *r2 = (char *)malloc(sizeof(char) * (20 + 1)); char *r3 = (char *)malloc(sizeof(char) * (20 + 1)); char *r4 = (char *)malloc(sizeof(char) * (20 + 1)); char *r5 = (char *)malloc(sizeof(char) * (20 + 1)); char *r6 = (char *)malloc(sizeof(char) * (20 + 1)); if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT %ssum(calls), sum(%s), sum(rows)," " sum(shared_blks_hit), sum(shared_blks_read), sum(shared_blks_dirtied), sum(shared_blks_written)," " sum(local_blks_hit), sum(local_blks_read), sum(local_blks_dirtied), sum(local_blks_written)," " sum(temp_blks_read), sum(temp_blks_written)" "%s%s%s" "%s" " FROM %s.pg_stat_statements ", backend_minimum_version(13, 0) ? "sum(plans), sum(total_plan_time), " : "", backend_minimum_version(13, 0) ? "total_exec_time" : "total_time", backend_minimum_version(17, 0) ? ", sum(shared_blk_read_time), sum(shared_blk_write_time)" : ", sum(blk_read_time), sum(blk_write_time)", backend_minimum_version(17, 0) ? ", sum(local_blk_read_time), sum(local_blk_write_time)" : "", backend_minimum_version(16, 0) ? ", sum(temp_blk_read_time), sum(temp_blk_write_time)" : "", backend_minimum_version(13, 0) ? ", sum(wal_records), sum(wal_fpi), sum(wal_bytes)" : "", opts->namespace); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT %scalls, %s, rows," " shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written," " local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written," " temp_blks_read, temp_blks_written," "%s%s%s" "%s" " FROM %s.pg_stat_statements " "WHERE queryid=$1", backend_minimum_version(13, 0) ? "plans, total_plan_time, " : "", backend_minimum_version(13, 0) ? "total_exec_time" : "total_time", backend_minimum_version(17, 0) ? ", shared_blk_read_time, shared_blk_write_time" : ", blk_read_time, blk_write_time", backend_minimum_version(17, 0) ? ", local_blk_read_time, local_blk_write_time" : "", backend_minimum_version(16, 0) ? ", temp_blk_read_time, temp_blk_write_time" : "", backend_minimum_version(13, 0) ? ", wal_records, wal_fpi, wal_bytes" : "", opts->namespace); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ if (backend_minimum_version(13, 0)) { plans = atol(PQgetvalue(res, row, column++)); total_plan_time = atof(PQgetvalue(res, row, column++)); } calls = atol(PQgetvalue(res, row, column++)); total_exec_time = atof(PQgetvalue(res, row, column++)); rows = atol(PQgetvalue(res, row, column++)); shared_blks_hit = atol(PQgetvalue(res, row, column++)); shared_blks_read = atol(PQgetvalue(res, row, column++)); shared_blks_dirtied = atol(PQgetvalue(res, row, column++)); shared_blks_written = atol(PQgetvalue(res, row, column++)); local_blks_hit = atol(PQgetvalue(res, row, column++)); local_blks_read = atol(PQgetvalue(res, row, column++)); local_blks_dirtied = atol(PQgetvalue(res, row, column++)); local_blks_written = atol(PQgetvalue(res, row, column++)); temp_blks_read = atol(PQgetvalue(res, row, column++)); temp_blks_written = atol(PQgetvalue(res, row, column++)); shared_blk_read_time = atof(PQgetvalue(res, row, column++)); shared_blk_write_time = atof(PQgetvalue(res, row, column++)); if (backend_minimum_version(17, 0)) { local_blk_read_time = atof(PQgetvalue(res, row, column++)); local_blk_write_time = atof(PQgetvalue(res, row, column++)); } if (backend_minimum_version(16, 0)) { temp_blk_read_time = atof(PQgetvalue(res, row, column++)); temp_blk_write_time = atof(PQgetvalue(res, row, column++)); } if (backend_minimum_version(13, 0)) { wal_records = atol(PQgetvalue(res, row, column++)); wal_fpi = atol(PQgetvalue(res, row, column++)); wal_bytes = atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ if ((opts->substat == NULL || strstr(opts->substat, "plan") != NULL) && backend_minimum_version(13, 0)) { format(r1, plans - previous_pgstatstatement->plans, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format_time(r2, total_plan_time - previous_pgstatstatement->total_plan_time, 9); (void)printf(" %s %s", r1, r2); } if (opts->substat == NULL || strstr(opts->substat, "exec") != NULL) { format(r1, calls - previous_pgstatstatement->calls, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format_time(r2, total_exec_time - previous_pgstatstatement->total_exec_time, 9); format(r3, rows - previous_pgstatstatement->rows, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s", r1, r2, r3); } if (opts->substat == NULL || strstr(opts->substat, "shared") != NULL) { format(r1, shared_blks_hit - previous_pgstatstatement->shared_blks_hit, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, shared_blks_read - previous_pgstatstatement->shared_blks_read, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r3, shared_blks_dirtied - previous_pgstatstatement->shared_blks_dirtied, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r4, shared_blks_written - previous_pgstatstatement->shared_blks_written, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s", r1, r2, r3, r4); } if (opts->substat == NULL || strstr(opts->substat, "local") != NULL) { format(r1, local_blks_hit - previous_pgstatstatement->local_blks_hit, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, local_blks_read - previous_pgstatstatement->local_blks_read, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r3, local_blks_dirtied - previous_pgstatstatement->local_blks_dirtied, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r4, local_blks_written - previous_pgstatstatement->local_blks_written, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s", r1, r2, r3, r4); } if (opts->substat == NULL || strstr(opts->substat, "temp") != NULL) { format(r1, temp_blks_read - previous_pgstatstatement->temp_blks_read, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, temp_blks_written - previous_pgstatstatement->temp_blks_written, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s", r1, r2); } if (opts->substat == NULL || strstr(opts->substat, "time") != NULL) { if (backend_minimum_version(17, 0)) { format_time(r1, shared_blk_read_time - previous_pgstatstatement->shared_blk_read_time, 9); format_time(r2, shared_blk_write_time - previous_pgstatstatement->shared_blk_write_time, 9); format_time(r3, local_blk_read_time - previous_pgstatstatement->local_blk_read_time, 9); format_time(r4, local_blk_write_time - previous_pgstatstatement->local_blk_write_time, 9); format_time(r5, temp_blk_read_time - previous_pgstatstatement->temp_blk_read_time, 9); format_time(r6, temp_blk_write_time - previous_pgstatstatement->temp_blk_write_time, 9); (void)printf(" %s %s %s %s %s %s", r1, r2, r3, r4, r5, r6); } else if (backend_minimum_version(16, 0)) { format_time(r1, shared_blk_read_time - previous_pgstatstatement->shared_blk_read_time, 9); format_time(r2, shared_blk_write_time - previous_pgstatstatement->shared_blk_write_time, 9); format_time(r3, temp_blk_read_time - previous_pgstatstatement->temp_blk_read_time, 9); format_time(r4, temp_blk_write_time - previous_pgstatstatement->temp_blk_write_time, 9); (void)printf(" %s %s %s %s", r1, r2, r3, r4); } else if (backend_minimum_version(13, 0)) { format_time(r1, shared_blk_read_time - previous_pgstatstatement->shared_blk_read_time, 9); format_time(r2, shared_blk_write_time - previous_pgstatstatement->shared_blk_write_time, 9); (void)printf(" %s %s", r1, r2); } } if ((opts->substat == NULL || strstr(opts->substat, "wal") != NULL) && backend_minimum_version(13, 0)) { format(r1, wal_records - previous_pgstatstatement->wal_records, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r2, wal_fpi - previous_pgstatstatement->wal_fpi, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r3, wal_bytes - previous_pgstatstatement->wal_bytes, 6, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s", r1, r2, r3); } (void)printf("\n"); /* setting the new old value */ previous_pgstatstatement->plans = plans; previous_pgstatstatement->total_plan_time = total_plan_time; previous_pgstatstatement->calls = calls; previous_pgstatstatement->total_exec_time = total_exec_time; previous_pgstatstatement->rows = rows; previous_pgstatstatement->shared_blks_hit = shared_blks_hit; previous_pgstatstatement->shared_blks_read = shared_blks_read; previous_pgstatstatement->shared_blks_dirtied = shared_blks_dirtied; previous_pgstatstatement->shared_blks_written = shared_blks_written; previous_pgstatstatement->local_blks_hit = local_blks_hit; previous_pgstatstatement->local_blks_read = local_blks_read; previous_pgstatstatement->local_blks_dirtied = local_blks_dirtied; previous_pgstatstatement->local_blks_written = local_blks_written; previous_pgstatstatement->temp_blks_read = temp_blks_read; previous_pgstatstatement->temp_blks_written = temp_blks_written; previous_pgstatstatement->shared_blk_read_time = shared_blk_read_time; previous_pgstatstatement->shared_blk_write_time = shared_blk_write_time; previous_pgstatstatement->local_blk_read_time = local_blk_read_time; previous_pgstatstatement->local_blk_write_time = local_blk_write_time; previous_pgstatstatement->temp_blk_read_time = temp_blk_read_time; previous_pgstatstatement->temp_blk_write_time = temp_blk_write_time; previous_pgstatstatement->wal_records = wal_records; previous_pgstatstatement->wal_fpi = wal_fpi; previous_pgstatstatement->wal_bytes = wal_bytes; }; /* cleanup */ free(r1); free(r2); free(r3); free(r4); free(r5); free(r6); PQclear(res); } /* * Dump all SLRU stats. */ void print_pgstatslru() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long blks_zeroed = 0; long blks_hit = 0; long blks_read = 0; long blks_written = 0; long blks_exists = 0; long flushes = 0; long truncates = 0; char *stats_reset; bool has_been_reset; char *r_blks_zeroed = (char *)malloc(sizeof(char) * (9 + 1)); char *r_blks_hit = (char *)malloc(sizeof(char) * (9 + 1)); char *r_blks_read = (char *)malloc(sizeof(char) * (9 + 1)); char *r_blks_written = (char *)malloc(sizeof(char) * (9 + 1)); char *r_blks_exists = (char *)malloc(sizeof(char) * (9 + 1)); char *r_flushes = (char *)malloc(sizeof(char) * (9 + 1)); char *r_truncates = (char *)malloc(sizeof(char) * (9 + 1)); /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(blks_zeroed), sum(blks_hit), sum(blks_read), sum(blks_written), " "sum(blks_exists), sum(flushes), sum(truncates), " "max(stats_reset), max(stats_reset)>'%s' " "FROM pg_stat_slru ", previous_pgstatslru->stats_reset); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT sum(blks_zeroed), sum(blks_hit), sum(blks_read), sum(blks_written), " "sum(blks_exists), sum(flushes), sum(truncates), " "stats_reset, stats_reset>'%s' " "FROM pg_stat_slru " "WHERE name = $1", previous_pgstatslru->stats_reset); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ blks_zeroed = atol(PQgetvalue(res, row, column++)); blks_hit = atol(PQgetvalue(res, row, column++)); blks_read = atol(PQgetvalue(res, row, column++)); blks_written = atol(PQgetvalue(res, row, column++)); blks_exists = atol(PQgetvalue(res, row, column++)); flushes = atol(PQgetvalue(res, row, column++)); truncates = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatslru->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_slru has been reset!\n"); } /* printing the diff... note that the first line will be the current value, rather than the diff */ format(r_blks_zeroed, blks_zeroed - previous_pgstatslru->blks_zeroed, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_blks_hit, blks_hit - previous_pgstatslru->blks_hit, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_blks_read, blks_read - previous_pgstatslru->blks_read, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_blks_written, blks_written - previous_pgstatslru->blks_written, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_blks_exists, blks_exists - previous_pgstatslru->blks_exists, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_flushes, flushes - previous_pgstatslru->flushes, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_truncates, truncates - previous_pgstatslru->truncates, 9, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s %s %s\n", r_blks_zeroed, r_blks_hit, r_blks_read, r_blks_written, r_blks_exists, r_flushes, r_truncates ); /* setting the new old value */ previous_pgstatslru->blks_zeroed = blks_zeroed; previous_pgstatslru->blks_hit = blks_hit; previous_pgstatslru->blks_read = blks_read; previous_pgstatslru->blks_written = blks_written; previous_pgstatslru->blks_exists = blks_exists; previous_pgstatslru->flushes = flushes; previous_pgstatslru->truncates = truncates; previous_pgstatslru->stats_reset = stats_reset; } /* cleanup */ free(r_blks_zeroed); free(r_blks_hit); free(r_blks_read); free(r_blks_written); free(r_blks_exists); free(r_flushes); free(r_truncates); PQclear(res); } /* * Dump all wal stats. */ void print_pgstatwal() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long wal_records; long wal_fpi; long wal_bytes; long wal_buffers_full; long wal_write; long wal_sync; float wal_write_time; float wal_sync_time; char *stats_reset; bool has_been_reset; char *r_wal_records = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_fpi = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_bytes = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_buffers_full = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_write = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_sync = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_write_time = (char *)malloc(sizeof(char) * (10 + 1)); char *r_wal_sync_time = (char *)malloc(sizeof(char) * (10 + 1)); /* grab the stats (this is the only stats on one line) */ snprintf(sql, sizeof(sql), "SELECT wal_records, wal_fpi, wal_bytes, wal_buffers_full, " "wal_write, wal_sync, wal_write_time, wal_sync_time, " "stats_reset, stats_reset>'%s' " "FROM pg_stat_wal ", previous_pgstatwal->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ wal_records = atol(PQgetvalue(res, row, column++)); wal_fpi = atol(PQgetvalue(res, row, column++)); wal_bytes = atol(PQgetvalue(res, row, column++)); wal_buffers_full = atol(PQgetvalue(res, row, column++)); wal_write = atol(PQgetvalue(res, row, column++)); wal_sync = atol(PQgetvalue(res, row, column++)); wal_write_time = atof(PQgetvalue(res, row, column++)); wal_sync_time = atof(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatwal->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_wal has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ format(r_wal_records, wal_records - previous_pgstatwal->wal_records, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_wal_fpi, wal_fpi - previous_pgstatwal->wal_fpi, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_wal_bytes, wal_bytes - previous_pgstatwal->wal_bytes, 10, opts->human_readable ? SIZE_UNIT : NO_UNIT); format(r_wal_buffers_full, wal_buffers_full - previous_pgstatwal->wal_buffers_full, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_wal_write, wal_write - previous_pgstatwal->wal_write, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_wal_sync, wal_sync - previous_pgstatwal->wal_sync, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format_time(r_wal_write_time, wal_write_time - previous_pgstatwal->wal_write_time, 10); format_time(r_wal_sync_time, wal_sync_time - previous_pgstatwal->wal_sync_time, 10); (void)printf(" %s %s %s %s %s %s %s %s\n", r_wal_records, r_wal_fpi, r_wal_bytes, r_wal_buffers_full, r_wal_write, r_wal_sync, r_wal_write_time, r_wal_sync_time ); /* setting the new old value */ previous_pgstatwal->wal_records = wal_records; previous_pgstatwal->wal_fpi = wal_fpi; previous_pgstatwal->wal_bytes = wal_bytes; previous_pgstatwal->wal_buffers_full = wal_buffers_full; previous_pgstatwal->wal_write = wal_write; previous_pgstatwal->wal_sync = wal_sync; previous_pgstatwal->wal_write_time = wal_write_time; previous_pgstatwal->wal_sync_time = wal_sync_time; previous_pgstatwal->stats_reset = stats_reset; } /* cleanup */ free(r_wal_records); free(r_wal_fpi); free(r_wal_bytes); free(r_wal_buffers_full); free(r_wal_write); free(r_wal_sync); free(r_wal_write_time); free(r_wal_sync_time); PQclear(res); } /* * Dump base backup progress. */ void print_pgstatprogressbasebackup() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT pid," " phase," " pg_size_pretty(backup_streamed)," " pg_size_pretty(backup_total)," " CASE WHEN backup_total>0" " THEN trunc(backup_streamed::numeric*100/backup_total,2)::text" " ELSE 'N/A' END," " CASE WHEN tablespaces_total>0" " THEN trunc(tablespaces_streamed::numeric*100/tablespaces_total,2)::text" " ELSE 'N/A' END," " (now()-query_start)::time(0) " "FROM pg_stat_progress_basebackup " "JOIN pg_stat_activity USING (pid) " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-10s %-28s %-10s %-10s %6s %6s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6) ); }; /* cleanup */ PQclear(res); } /* * Dump analyze progress. */ void print_pgstatprogressanalyze() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, relname," " pg_size_pretty(pg_table_size(relid))," " phase," " CASE WHEN sample_blks_total>0" " THEN trunc(sample_blks_scanned::numeric*100/sample_blks_total,2)::text" " ELSE 'N/A' END," " CASE WHEN ext_stats_total>0" " THEN trunc(ext_stats_computed::numeric*100/ext_stats_total,2)::text" " ELSE 'N/A' END," " CASE WHEN child_tables_total>0" " THEN trunc(child_tables_done::numeric*100/child_tables_total,2)::text" " ELSE 'N/A' END," " (now()-query_start)::time(0) " "FROM pg_stat_progress_analyze s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class c ON c.oid=s.relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %10s %-24s %6s %6s %6s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6), PQgetvalue(res, row, 7) ); }; /* cleanup */ PQclear(res); } /* * Dump cluster progress. */ void print_pgstatprogresscluster() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, t.relname, i.relname," " phase, heap_tuples_scanned, heap_tuples_written," " CASE WHEN heap_blks_total=0 THEN 'N/A' ELSE trunc(heap_blks_scanned::numeric*100/heap_blks_total,2)::text END," " index_rebuild_count," " (now()-query_start)::time(0) " "FROM pg_stat_progress_cluster s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=s.relid " "LEFT JOIN pg_class i ON i.oid=s.cluster_index_relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-20s %-46s %12ld %12ld %5s %10ld %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), atol(PQgetvalue(res, row, 4)), atol(PQgetvalue(res, row, 5)), PQgetvalue(res, row, 6), atol(PQgetvalue(res, row, 7)), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump copy progress. */ void print_pgstatprogresscopy() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT pc.datname, t.relname," " command, type," " bytes_processed, bytes_total, tuples_processed, tuples_excluded," " (now()-query_start)::time(0) " "FROM pg_stat_progress_copy pc " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=pc.relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-23s %-20s %10ld %10ld %10ld %10ld %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), atol(PQgetvalue(res, row, 4)), atol(PQgetvalue(res, row, 5)), atol(PQgetvalue(res, row, 6)), atol(PQgetvalue(res, row, 7)), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump index creation progress. */ void print_pgstatprogresscreateindex() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, t.relname, i.relname," " phase," " CASE WHEN lockers_total=0 THEN 'N/A' ELSE trunc(lockers_done::numeric*100/lockers_total,2)::text END," " CASE WHEN blocks_total=0 THEN 'N/A' ELSE trunc(blocks_done::numeric*100/blocks_total,2)::text END," " CASE WHEN tuples_total=0 THEN 'N/A' ELSE trunc(tuples_done::numeric*100/tuples_total,2)::text END," " CASE WHEN partitions_total=0 THEN 'N/A' ELSE trunc(partitions_done::numeric*100/partitions_total,2)::text END, " " (now()-query_start)::time(0) " "FROM pg_stat_progress_create_index s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=s.relid " "LEFT JOIN pg_class i ON i.oid=s.index_relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-20s %-46s %5s %5s %5s %5s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6), PQgetvalue(res, row, 7), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump vacuum progress. */ void print_pgstatprogressvacuum() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, c.relname," " pg_size_pretty(pg_table_size(s.relid))," " s.phase," " CASE WHEN s.heap_blks_total=0 THEN 'N/A' ELSE trunc(s.heap_blks_scanned::numeric*100/s.heap_blks_total,2)::text END," " CASE WHEN s.heap_blks_total=0 THEN 'N/A' ELSE trunc(s.heap_blks_vacuumed::numeric*100/s.heap_blks_total,2)::text END," " s.index_vacuum_count," " CASE WHEN s.%s=0 THEN 'N/A' ELSE trunc(s.%s::numeric*100/s.%s,2)::text END," " (now()-a.query_start)::time(0) " "FROM pg_stat_progress_vacuum s " "JOIN pg_stat_activity a ON s.pid=a.pid " "LEFT JOIN pg_class c ON c.oid=s.relid " "ORDER BY s.pid", backend_minimum_version(17, 0) ? "max_dead_tuple_bytes" : "max_dead_tuples", backend_minimum_version(17, 0) ? "dead_tuple_bytes" : "num_dead_tuples", backend_minimum_version(17, 0) ? "max_dead_tuple_bytes" : "max_dead_tuples"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %10s %-24s %5s %5s %5ld %5s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), atol(PQgetvalue(res, row, 6)), PQgetvalue(res, row, 7), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump all buffercache stats. */ void print_buffercache() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long usedblocks = 0; long usedblocks_pct = 0; long dirtyblocks = 0; long dirtyblocks_pct = 0; char *r_usedblocks = (char *)malloc(sizeof(char) * (5 + 1)); char *r_usedblocks_pct = (char *)malloc(sizeof(char) * (5 + 1)); char *r_dirtyblocks = (char *)malloc(sizeof(char) * (5 + 1)); char *r_dirtyblocks_pct = (char *)malloc(sizeof(char) * (5 + 1)); snprintf(sql, sizeof(sql), "SELECT count(*) FILTER (WHERE relfilenode IS NOT NULL), " "100. * count(*) FILTER (WHERE relfilenode IS NOT NULL) / count(*), " "count(*) FILTER (WHERE isdirty), " "100. * count(*) FILTER (WHERE isdirty) / count(*) " "FROM %s.pg_buffercache ", opts->namespace); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; usedblocks = atol(PQgetvalue(res, row, column++)); usedblocks_pct = atol(PQgetvalue(res, row, column++)); dirtyblocks = atol(PQgetvalue(res, row, column++)); dirtyblocks_pct = atol(PQgetvalue(res, row, column++)); /* printing the actual values for once */ format(r_usedblocks, usedblocks, 7, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_usedblocks_pct, usedblocks_pct, 5, NO_UNIT); format(r_dirtyblocks, dirtyblocks, 7, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_dirtyblocks_pct, dirtyblocks_pct, 5, NO_UNIT); (void)printf(" %s %s %s %s\n", r_usedblocks, r_usedblocks_pct, r_dirtyblocks, r_dirtyblocks_pct); } /* cleanup */ free(r_usedblocks); free(r_usedblocks_pct); free(r_dirtyblocks); free(r_dirtyblocks_pct); PQclear(res); } /* * Dump all xlog writes stats. */ void print_xlogstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *xlogfilename; char *currentlocation; long locationdiff; char *r_locationdiff = (char *)malloc(sizeof(char) * (12 + 1)); if (backend_minimum_version(10, 0)) { snprintf(sql, sizeof(sql), "SELECT " " pg_walfile_name(pg_current_wal_lsn()), " " pg_current_wal_lsn(), " " pg_wal_lsn_diff(pg_current_wal_lsn(), '0/0')"); } else { snprintf(sql, sizeof(sql), "SELECT " " pg_xlogfile_name(pg_current_xlog_location()), " " pg_current_xlog_location(), " " pg_xlog_location_diff(pg_current_xlog_location(), '0/0')"); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } xlogfilename = pg_strdup(PQgetvalue(res, 0, 0)); currentlocation = pg_strdup(PQgetvalue(res, 0, 1)); locationdiff = atol(PQgetvalue(res, 0, 2)); /* printing the actual values for once */ format(r_locationdiff, locationdiff - previous_xlogstats->locationdiff, 12, opts->human_readable ? SIZE_UNIT : NO_UNIT); (void)printf(" %s %s %s\n", xlogfilename, currentlocation, r_locationdiff); /* setting the new old value */ previous_xlogstats->location = pg_strdup(currentlocation); previous_xlogstats->locationdiff = locationdiff; /* cleanup */ free(r_locationdiff); PQclear(res); } /* * Dump dead live tuples stats. */ void print_deadlivestats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; long live; long dead; char *r_live = (char *)malloc(sizeof(char) * (10 + 1)); char *r_dead = (char *)malloc(sizeof(char) * (10 + 1)); snprintf(sql, sizeof(sql), "SELECT sum(n_live_tup), sum(n_dead_tup) FROM pg_stat_all_tables"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } live = atol(PQgetvalue(res, 0, 0)); dead = atol(PQgetvalue(res, 0, 1)); /* printing the actual values for once */ format(r_live, live, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_dead, dead, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %.2f\n", r_live, r_dead, dead+live == 0 ? 0 : 100.*dead/((dead+live))); /* setting the new old value */ previous_deadlivestats->live = live; previous_deadlivestats->dead = dead; /* cleanup */ free(r_live); free(r_dead); PQclear(res); } /* * Dump all repslots informations */ void print_repslotsstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *xlogfilename; char *currentlocation; long locationdiff; char *r_locationdiff = (char *)malloc(sizeof(char) * (12 + 1)); snprintf(sql, sizeof(sql), "SELECT " " pg_walfile_name(restart_lsn), " " restart_lsn, " " pg_wal_lsn_diff(restart_lsn, '0/0')" "FROM pg_replication_slots " "WHERE slot_name = '%s'", opts->filter); res = PQexec(conn, sql); if (!res || PQntuples(res) == 0) { PQclear(res); PQfinish(conn); pg_log_error("No results, meaning no replication slot"); exit(EXIT_FAILURE); } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } xlogfilename = pg_strdup(PQgetvalue(res, 0, 0)); currentlocation = pg_strdup(PQgetvalue(res, 0, 1)); locationdiff = atol(PQgetvalue(res, 0, 2)); /* printing the actual values for once */ format(r_locationdiff, locationdiff - previous_xlogstats->locationdiff, 12, opts->human_readable ? SIZE_UNIT : NO_UNIT); (void)printf(" %s %s %s\n", xlogfilename, currentlocation, r_locationdiff); /* setting the new old value */ previous_repslots->restartlsn = pg_strdup(currentlocation); previous_repslots->restartlsndiff = locationdiff; /* cleanup */ free(r_locationdiff); PQclear(res); } /* * Dump all temporary files stats. */ void print_tempfilestats() { char sql[2*PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; long size = 0; long count = 0; int nrows; int row, column; char *r_size = (char *)malloc(sizeof(char) * (10 + 1)); char *r_count = (char *)malloc(sizeof(char) * (10 + 1)); if (backend_minimum_version(9, 3)) { snprintf(sql, sizeof(sql), "SELECT unnest(regexp_matches(agg.tmpfile, 'pgsql_tmp([0-9]*)')) AS pid, " " SUM((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size), " " count(*) " "FROM " " (SELECT ls.oid, ls.spcname, " " ls.dir||'/'||ls.sub AS dir, CASE gs.i WHEN 1 THEN '' ELSE pglsdir END AS tmpfile " " FROM " " (SELECT sr.oid, sr.spcname, " " 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, " " pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub " " FROM (SELECT spc.oid, spc.spcname, " " pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, " " trim(trailing E'\n ' FROM pg_read_file('PG_VERSION')) as v " " FROM (SELECT oid, spcname FROM pg_tablespace WHERE spcname !~ '^pg_') AS spc " " ) sr " " WHERE sr.spc_root ~ ('^PG_'||sr.v) " " UNION ALL " " SELECT 0, 'pg_default', " " 'base' AS dir, " " 'pgsql_tmp' AS sub " " FROM pg_ls_dir('base') AS l " " WHERE l='pgsql_tmp' " " ) AS ls, " " (SELECT generate_series(1,2) AS i) AS gs, " " LATERAL pg_ls_dir(dir||'/'||ls.sub) pglsdir " " WHERE ls.sub = 'pgsql_tmp') agg " "GROUP BY 1"); } else { snprintf(sql, sizeof(sql), "SELECT unnest(regexp_matches(agg.tmpfile, 'pgsql_tmp([0-9]*)')) AS pid, " " SUM((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size), " " count(*) " "FROM " " (SELECT ls.oid, ls.spcname, " " ls.dir||'/'||ls.sub AS dir, CASE gs.i WHEN 1 THEN '' ELSE pg_ls_dir(dir||'/'||ls.sub) END AS tmpfile " " FROM " " (SELECT sr.oid, sr.spcname, " " 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, " " pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub " " FROM (SELECT spc.oid, spc.spcname, " " pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, " " trim(trailing E'\n ' FROM pg_read_file('PG_VERSION')) as v " " FROM (SELECT oid, spcname FROM pg_tablespace WHERE spcname !~ '^pg_') AS spc " " ) sr " " WHERE sr.spc_root ~ ('^PG_'||sr.v) " " UNION ALL " " SELECT 0, 'pg_default', " " 'base' AS dir, " " 'pgsql_tmp' AS sub " " FROM pg_ls_dir('base') AS l " " WHERE l='pgsql_tmp' " " ) AS ls, " " (SELECT generate_series(1,2) AS i) AS gs " " WHERE ls.sub = 'pgsql_tmp') agg " "GROUP BY 1"); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { column = 1; /* getting new values */ size += atol(PQgetvalue(res, row, column++)); count += atol(PQgetvalue(res, row, column++)); } /* printing the diff... */ format(r_size, size, 10, opts->human_readable ? SIZE_UNIT : NO_UNIT); format(r_count, count, 10, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s\n", r_size, r_count); /* cleanup */ free(r_size); free(r_count); PQclear(res); } /* * Dump all wait event stats. */ void print_pgstatwaitevent() { char sql[2*PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; char *r_lwlock = (char *)malloc(sizeof(char) * (10 + 1)); char *r_lock = (char *)malloc(sizeof(char) * (10 + 1)); char *r_bufferpin = (char *)malloc(sizeof(char) * (10 + 1)); char *r_activity = (char *)malloc(sizeof(char) * (10 + 1)); char *r_client = (char *)malloc(sizeof(char) * (10 + 1)); char *r_extension = (char *)malloc(sizeof(char) * (10 + 1)); char *r_ipc = (char *)malloc(sizeof(char) * (10 + 1)); char *r_timeout = (char *)malloc(sizeof(char) * (10 + 1)); char *r_io = (char *)malloc(sizeof(char) * (10 + 1)); char *r_running = (char *)malloc(sizeof(char) * (10 + 1)); char *r_all = (char *)malloc(sizeof(char) * (10 + 1)); snprintf(sql, sizeof(sql), "SELECT " " count(*) FILTER (WHERE wait_event_type='LWLock') AS LWLock, " " count(*) FILTER (WHERE wait_event_type='Lock') AS Lock, " " count(*) FILTER (WHERE wait_event_type='BufferPin') AS BufferPin, " " count(*) FILTER (WHERE wait_event_type='Activity') AS Activity, " " count(*) FILTER (WHERE wait_event_type='Client') AS Client, " " count(*) FILTER (WHERE wait_event_type='Extension') AS Extension, " " count(*) FILTER (WHERE wait_event_type='IPC') AS IPC, " " count(*) FILTER (WHERE wait_event_type='Timeout') AS Timeout, " " count(*) FILTER (WHERE wait_event_type='IO') AS IO, " " count(*) FILTER (WHERE wait_event_type IS NULL) AS Running, " " count(*) AS All " "FROM pg_stat_activity;"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* printing new values */ format(r_lwlock, atoi(PQgetvalue(res, row, 0)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_lock, atoi(PQgetvalue(res, row, 1)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_bufferpin, atoi(PQgetvalue(res, row, 2)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_activity, atoi(PQgetvalue(res, row, 3)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_client, atoi(PQgetvalue(res, row, 4)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_extension, atoi(PQgetvalue(res, row, 5)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_ipc, atoi(PQgetvalue(res, row, 6)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_timeout, atoi(PQgetvalue(res, row, 7)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_io, atoi(PQgetvalue(res, row, 8)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_running, atoi(PQgetvalue(res, row, 9)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); format(r_all, atoi(PQgetvalue(res, row, 10)), 10, opts->human_readable ? ALL_UNIT : NO_UNIT); (void)printf(" %s %s %s %s %s %s %s %s %s %s %s\n", r_lwlock, r_lock, r_bufferpin, r_activity, r_client, r_extension, r_ipc, r_timeout, r_io, r_running, r_all ); } /* cleanup */ free(r_lwlock); free(r_lock); free(r_bufferpin); free(r_activity); free(r_client); free(r_extension); free(r_ipc); free(r_timeout); free(r_io); free(r_running); free(r_all); PQclear(res); } /* * Dump all pgBouncer pools stats. */ void print_pgbouncerpools() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long cl_active = 0; long cl_waiting = 0; long sv_active = 0; long sv_idle = 0; long sv_used = 0; long sv_tested = 0; long sv_login = 0; long maxwait = 0; /* * We cannot use a filter now, we need to get all rows. */ snprintf(sql, sizeof(sql), "SHOW pools"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* we don't use the first two columns */ column = 2; /* getting new values */ cl_active += atol(PQgetvalue(res, row, column++)); cl_waiting += atol(PQgetvalue(res, row, column++)); sv_active += atol(PQgetvalue(res, row, column++)); sv_idle += atol(PQgetvalue(res, row, column++)); sv_used += atol(PQgetvalue(res, row, column++)); sv_tested += atol(PQgetvalue(res, row, column++)); sv_login += atol(PQgetvalue(res, row, column++)); maxwait += atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld\n", cl_active, cl_waiting, sv_active, sv_idle, sv_used, sv_tested, sv_login, maxwait ); /* cleanup */ PQclear(res); } /* * Dump all pgBouncer stats. */ void print_pgbouncerstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long total_request = 0; long total_received = 0; long total_sent = 0; long total_query_time = 0; /* * We cannot use a filter now, we need to get all rows. */ snprintf(sql, sizeof(sql), "SHOW stats"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* we don't use the first column */ column = 1; /* getting new values */ total_request += atol(PQgetvalue(res, row, column++)); total_received += atol(PQgetvalue(res, row, column++)); total_sent += atol(PQgetvalue(res, row, column++)); total_query_time += atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld\n", total_request - previous_pgbouncerstats->total_request, total_received - previous_pgbouncerstats->total_received, total_sent - previous_pgbouncerstats->total_sent, total_query_time - previous_pgbouncerstats->total_query_time ); /* setting the new old value */ previous_pgbouncerstats->total_request = total_request; previous_pgbouncerstats->total_received = total_received; previous_pgbouncerstats->total_sent = total_sent; previous_pgbouncerstats->total_query_time = total_query_time; /* cleanup */ PQclear(res); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Fetch setting value */ char *fetch_setting(char *name) { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *setting; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT setting FROM pg_settings WHERE name='%s'", name); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } /* get the only row as the setting value */ setting = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("%s is set to %s\n", name, setting); /* cleanup */ PQclear(res); return setting; } /* * Fetch pg_buffercache namespace */ void fetch_pgbuffercache_namespace() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the pg_stat_statement installation schema */ if (backend_minimum_version(9, 1)) { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_extension e " "JOIN pg_namespace n ON e.extnamespace=n.oid " "WHERE extname='pg_buffercache'"); } else { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_proc p " "JOIN pg_namespace n ON p.pronamespace=n.oid " "WHERE proname='pg_buffercache'"); } /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } if (PQntuples(res) > 0) { /* get the only row, and parse it to get major and minor numbers */ opts->namespace = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("pg_buffercache namespace: %s\n", opts->namespace); } /* cleanup */ PQclear(res); } /* * Fetch pg_stat_statement namespace */ void fetch_pgstatstatements_namespace() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the pg_stat_statement installation schema */ if (backend_minimum_version(9, 1)) { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_extension e " "JOIN pg_namespace n ON e.extnamespace=n.oid " "WHERE extname='pg_stat_statements'"); } else { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_proc p " "JOIN pg_namespace n ON p.pronamespace=n.oid " "WHERE proname='pg_stat_statements'"); } /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_warning("query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); pg_log_error("query was: %s", sql); exit(EXIT_FAILURE); } if (PQntuples(res) > 0) { /* get the only row, and parse it to get major and minor numbers */ opts->namespace = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("pg_stat_statements namespace: %s\n", opts->namespace); } /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { //return 13 > major || (13 == major && 0 >= minor); return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Print the right header according to the stats mode */ void print_header(void) { char header1[PGSTAT_DEFAULT_STRING_SIZE] = ""; char header2[PGSTAT_DEFAULT_STRING_SIZE] = ""; switch(opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: (void)printf("---- WAL counts ----\n"); (void)printf(" archived failed \n"); break; case BGWRITER: (void)printf("-------------- buffers -------------\n"); (void)printf(" clean alloc maxwritten\n"); break; case CHECKPOINTER: if (backend_minimum_version(17, 0)) { (void)printf("----- checkpoints ----- --------- restartpoints --------- ----- time ----- - buffers -\n"); (void)printf(" timed requested timed requested done write sync written\n"); } else if (backend_minimum_version(9, 2)) { (void)printf("----- checkpoints ----- ----- time ----- - buffers -\n"); (void)printf(" timed requested write sync written\n"); } else { (void)printf("----- checkpoints ----- - buffers -\n"); (void)printf(" timed requested written\n"); } break; case CONNECTION: (void)printf(" - total - active - lockwaiting - idle in transaction - idle -\n"); break; case DATABASE: if (opts->substat == NULL || strstr(opts->substat, "backends") != NULL) { strcat(header1, "- backends -"); strcat(header2, " "); } if (opts->substat == NULL || strstr(opts->substat, "xacts") != NULL) { strcat(header1, " ------ xacts ------"); strcat(header2, " commit rollback "); } if (opts->substat == NULL || strstr(opts->substat, "blocks") != NULL) { if (backend_minimum_version(9, 0)) { strcat(header1, " ----------------------- blocks ----------------------"); strcat(header2, " read hit hitratio read_time write_time "); } else { strcat(header1, " --------- blocks ---------"); strcat(header2, " read hit hit ratio"); } } if ((opts->substat == NULL || strstr(opts->substat, "tuples") != NULL) && backend_minimum_version(8, 3)) { strcat(header1, " -------------- tuples --------------"); strcat(header2, " ret fet ins upd del "); } if ((opts->substat == NULL || strstr(opts->substat, "temp") != NULL) && backend_minimum_version(9, 2)) { strcat(header1, " ----- temp -----"); strcat(header2, " files bytes "); } if ((opts->substat == NULL || strstr(opts->substat, "session") != NULL) && backend_minimum_version(14, 0)) { strcat(header1, " ------------------------------- session -------------------------------"); strcat(header2, " all_time active_time iit_time numbers abandoned fatal killed "); } if ((opts->substat == NULL || strstr(opts->substat, "misc") != NULL) && backend_minimum_version(8, 4)) { if (backend_minimum_version(12, 0)) { strcat(header1, " ------------ misc -------------"); strcat(header2, " conflicts deadlocks checksums"); } else if (backend_minimum_version(9, 2)) { strcat(header1, " ------- misc --------"); strcat(header2, " conflicts deadlocks"); } else { strcat(header1, " --- misc ---"); strcat(header2, " conflicts"); } } (void)printf("%s\n%s\n", header1, header2); break; case TABLE: if (backend_minimum_version(16, 0)) { (void)printf("-- sequential -- ----- index ---- ------------------------------- tuples ------------------------------- -------------- maintenance --------------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd newpageupd live dead analyze ins_vac vacuum autovacuum analyze autoanalyze\n"); } else if (backend_minimum_version(13, 0)) { (void)printf("-- sequential -- ----- index ----- ------------------------- tuples ------------------------- -------------- maintenance --------------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd live dead analyze ins_vac vacuum autovacuum analyze autoanalyze\n"); } else if (backend_minimum_version(9, 4)) { (void)printf("-- sequential -- ----- index ---- ------------------------- tuples ------------------ -------------- maintenance ------------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze\n"); } else if (backend_minimum_version(9, 1)) { (void)printf("-- sequential -- ----- index ---- ------------------------- tuples ---------- -------------- maintenance ------------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd live dead vacuum autovacuum analyze autoanalyze\n"); } else if (backend_minimum_version(8, 3)) { (void)printf("-- sequential -- ----- index ---- ------------------------- tuples ----------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd live dead\n"); } else { (void)printf("-- sequential -- ----- index ---- ------- tuples -------\n"); (void)printf(" scan tuples scan tuples ins upd del\n"); } break; case TABLEIO: (void)printf("---- heap table ---- ---- toast table --- --- heap indexes --- --- toast indexes --\n"); (void)printf(" read hit read hit read hit read hit \n"); break; case INDEX: (void)printf("-- scan -- ------ tuples -----\n"); (void)printf(" read fetch\n"); break; case FUNCTION: (void)printf("-- count -- --------- time ---------\n"); (void)printf(" total self\n"); break; case STATEMENT: if ((opts->substat == NULL || strstr(opts->substat, "plan") != NULL) && backend_minimum_version(13, 0)) { strcat(header1, "------ plan ------"); strcat(header2, " plans time "); } if (opts->substat == NULL || strstr(opts->substat, "exec") != NULL) { strcat(header1, " --------- exec ----------"); strcat(header2, " calls time rows "); } if (opts->substat == NULL || strstr(opts->substat, "shared") != NULL) { strcat(header1, " ----------- shared -----------"); strcat(header2, " hit read dirty written "); } if (opts->substat == NULL || strstr(opts->substat, "local") != NULL) { strcat(header1, " ----------- local -----------"); strcat(header2, " hit read dirty written "); } if (opts->substat == NULL || strstr(opts->substat, "temp") != NULL) { strcat(header1, " ----- temp -----"); strcat(header2, " read written "); } if (opts->substat == NULL || strstr(opts->substat, "time") != NULL) { if (backend_minimum_version(17, 0)) { strcat(header1, " ------------------------------- time ------------------------------"); strcat(header2, " shr read shr written loc read loc written tmp read tmp written "); } else if (backend_minimum_version(16, 0)) { strcat(header1, " ------------------- time --------------------"); strcat(header2, " read written tmp read tmp written "); } else if (backend_minimum_version(13, 0)) { strcat(header1, " -------- time --------"); strcat(header2, " read written "); } } if ((opts->substat == NULL || strstr(opts->substat, "wal") != NULL) && backend_minimum_version(13, 0)) { strcat(header1, " ---------- wal ----------"); strcat(header2, " records fpi bytes"); } (void)printf("%s\n%s\n", header1, header2); break; case SLRU: (void)printf(" zeroed hit read written exists flushes truncates\n"); break; case WAL: (void)printf(" records FPI bytes buffers_full write sync write_time sync_time\n"); break; case BUFFERCACHE: (void)printf("------ used ------ ------ dirty -----\n"); (void)printf(" total percent total percent\n"); break; case DEADLIVE: (void)printf(" live dead percent\n"); break; case XLOG: case REPSLOTS: (void)printf("-------- filename -------- -- location -- ---- bytes ----\n"); break; case TEMPFILE: (void)printf("--- size --- --- count ---\n"); break; case WAITEVENT: (void)printf("---- LWLock ------- Lock --- BufferPin --- Activity --- Client --- Extension ------- IPC --- Timeout ------- IO --- Running ------ All ---\n"); break; case PROGRESS_ANALYZE: (void)printf("--------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- -- time elapsed --\n"); (void)printf(" database relation size %%sample blocks %%ext stats %%child tables\n"); break; case PROGRESS_BASEBACKUP: (void)printf("--- pid --- ---------- phase ---------- ---------------------- stats -------------------- -- time elapsed --\n"); (void)printf(" Sent size - Total size - %%Sent - %%Tablespaces\n"); break; case PROGRESS_CLUSTER: (void)printf("--------------------------- object -------------------------- -------------------- phase -------------------- ------------------- stats ------------------- -- time elapsed --\n"); (void)printf(" database table index tuples scanned tuples written %%blocks index rebuilt\n"); break; case PROGRESS_COPY: (void)printf("----------------- object ---------------- -------------------- phase -------------------- --------- bytes --------- ------- tuples -------- -- time elapsed --\n"); (void)printf(" database table command type processed total processed excluded\n"); break; case PROGRESS_CREATEINDEX: (void)printf("--------------------------- object -------------------------- -------------------- phase -------------------- ------------------- stats ------------------- -- time elapsed --\n"); (void)printf(" database table index %%lockers %%blocks %%tuples %%partitions\n"); break; case PROGRESS_VACUUM: (void)printf("--------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- -- time elapsed --\n"); (void)printf(" database relation size %%scan %%vacuum #index %%dead tuple\n"); break; case PBPOOLS: (void)printf("---- client ----- ---------------- server ---------------- -- misc --\n"); (void)printf(" active waiting active idle used tested login maxwait\n"); break; case PBSTATS: (void)printf("---------------- total -----------------\n"); (void)printf(" request received sent query time\n"); break; } if (wresized != 0) doresize(); if (opts->dontredisplayheader) hdrcnt = 0; else hdrcnt = winlines; } /* * Call the right function according to the stats mode */ void print_line(void) { switch(opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: print_pgstatarchiver(); break; case BGWRITER: print_pgstatbgwriter(); break; case CHECKPOINTER: print_pgstatcheckpointer(); break; case CONNECTION: print_pgstatconnection(); break; case DATABASE: print_pgstatdatabase(); break; case TABLE: print_pgstattable(); break; case TABLEIO: print_pgstattableio(); break; case INDEX: print_pgstatindex(); break; case FUNCTION: print_pgstatfunction(); break; case STATEMENT: print_pgstatstatement(); break; case SLRU: print_pgstatslru(); break; case WAL: print_pgstatwal(); break; case BUFFERCACHE: print_buffercache(); break; case XLOG: print_xlogstats(); break; case DEADLIVE: print_deadlivestats(); break; case REPSLOTS: print_repslotsstats(); break; case PROGRESS_ANALYZE: print_pgstatprogressanalyze(); break; case PROGRESS_BASEBACKUP: print_pgstatprogressbasebackup(); break; case PROGRESS_CLUSTER: print_pgstatprogresscluster(); break; case PROGRESS_COPY: print_pgstatprogresscopy(); break; case PROGRESS_CREATEINDEX: print_pgstatprogresscreateindex(); break; case PROGRESS_VACUUM: print_pgstatprogressvacuum(); break; case TEMPFILE: print_tempfilestats(); break; case WAITEVENT: print_pgstatwaitevent(); break; case PBPOOLS: print_pgbouncerpools(); break; case PBSTATS: print_pgbouncerstats(); break; } } /* * Allocate and initialize the right statistics struct according to the stats mode */ void allocate_struct(void) { switch (opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: previous_pgstatarchiver = (struct pgstatarchiver *) pg_malloc(sizeof(struct pgstatarchiver)); previous_pgstatarchiver->archived_count = 0; previous_pgstatarchiver->failed_count = 0; previous_pgstatarchiver->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case BGWRITER: previous_pgstatbgwriter = (struct pgstatbgwriter *) pg_malloc(sizeof(struct pgstatbgwriter)); previous_pgstatbgwriter->buffers_clean = 0; previous_pgstatbgwriter->maxwritten_clean = 0; previous_pgstatbgwriter->buffers_alloc = 0; previous_pgstatbgwriter->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case CHECKPOINTER: previous_pgstatcheckpointer = (struct pgstatcheckpointer *) pg_malloc(sizeof(struct pgstatcheckpointer)); previous_pgstatcheckpointer->checkpoints_timed = 0; previous_pgstatcheckpointer->checkpoints_requested = 0; previous_pgstatcheckpointer->restartpoints_timed = 0; previous_pgstatcheckpointer->restartpoints_requested = 0; previous_pgstatcheckpointer->restartpoints_timed = 0; previous_pgstatcheckpointer->write_time = 0; previous_pgstatcheckpointer->sync_time = 0; previous_pgstatcheckpointer->buffers_written = 0; previous_pgstatcheckpointer->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case CONNECTION: // nothing to do break; case DATABASE: previous_pgstatdatabase = (struct pgstatdatabase *) pg_malloc(sizeof(struct pgstatdatabase)); previous_pgstatdatabase->xact_commit = 0; previous_pgstatdatabase->xact_rollback = 0; previous_pgstatdatabase->blks_read = 0; previous_pgstatdatabase->blks_hit = 0; previous_pgstatdatabase->tup_returned = 0; previous_pgstatdatabase->tup_fetched = 0; previous_pgstatdatabase->tup_inserted = 0; previous_pgstatdatabase->tup_updated = 0; previous_pgstatdatabase->tup_deleted = 0; previous_pgstatdatabase->conflicts = 0; previous_pgstatdatabase->temp_files = 0; previous_pgstatdatabase->temp_bytes = 0; previous_pgstatdatabase->deadlocks = 0; previous_pgstatdatabase->checksum_failures = 0; previous_pgstatdatabase->blk_read_time = 0; previous_pgstatdatabase->blk_write_time = 0; previous_pgstatdatabase->session_time = 0; previous_pgstatdatabase->active_time = 0; previous_pgstatdatabase->idle_in_transaction_time = 0; previous_pgstatdatabase->sessions = 0; previous_pgstatdatabase->sessions_abandoned = 0; previous_pgstatdatabase->sessions_fatal = 0; previous_pgstatdatabase->sessions_killed = 0; previous_pgstatdatabase->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case TABLE: previous_pgstattable = (struct pgstattable *) pg_malloc(sizeof(struct pgstattable)); previous_pgstattable->seq_scan = 0; previous_pgstattable->seq_tup_read = 0; previous_pgstattable->idx_scan = 0; previous_pgstattable->idx_tup_fetch = 0; previous_pgstattable->n_tup_ins = 0; previous_pgstattable->n_tup_upd = 0; previous_pgstattable->n_tup_del = 0; previous_pgstattable->n_tup_hot_upd = 0; previous_pgstattable->n_tup_newpage_upd = 0; previous_pgstattable->n_live_tup = 0; previous_pgstattable->n_dead_tup = 0; previous_pgstattable->n_mod_since_analyze = 0; previous_pgstattable->n_ins_since_vacuum = 0; previous_pgstattable->vacuum_count = 0; previous_pgstattable->autovacuum_count = 0; previous_pgstattable->analyze_count = 0; previous_pgstattable->autoanalyze_count = 0; break; case TABLEIO: previous_pgstattableio = (struct pgstattableio *) pg_malloc(sizeof(struct pgstattableio)); previous_pgstattableio->heap_blks_read = 0; previous_pgstattableio->heap_blks_hit = 0; previous_pgstattableio->idx_blks_read = 0; previous_pgstattableio->idx_blks_hit = 0; previous_pgstattableio->toast_blks_read = 0; previous_pgstattableio->toast_blks_hit = 0; previous_pgstattableio->tidx_blks_read = 0; previous_pgstattableio->tidx_blks_hit = 0; break; case INDEX: previous_pgstatindex = (struct pgstatindex *) pg_malloc(sizeof(struct pgstatindex)); previous_pgstatindex->idx_scan = 0; previous_pgstatindex->idx_tup_read = 0; previous_pgstatindex->idx_tup_fetch = 0; break; case FUNCTION: previous_pgstatfunction = (struct pgstatfunction *) pg_malloc(sizeof(struct pgstatfunction)); previous_pgstatfunction->calls = 0; previous_pgstatfunction->total_time = 0; previous_pgstatfunction->self_time = 0; break; case STATEMENT: previous_pgstatstatement = (struct pgstatstatement *) pg_malloc(sizeof(struct pgstatstatement)); previous_pgstatstatement->plans = 0; previous_pgstatstatement->total_plan_time = 0; previous_pgstatstatement->calls = 0; previous_pgstatstatement->total_exec_time = 0; previous_pgstatstatement->rows = 0; previous_pgstatstatement->shared_blks_hit = 0; previous_pgstatstatement->shared_blks_read = 0; previous_pgstatstatement->shared_blks_dirtied = 0; previous_pgstatstatement->shared_blks_written = 0; previous_pgstatstatement->local_blks_hit = 0; previous_pgstatstatement->local_blks_read = 0; previous_pgstatstatement->local_blks_dirtied = 0; previous_pgstatstatement->local_blks_written = 0; previous_pgstatstatement->temp_blks_read = 0; previous_pgstatstatement->temp_blks_written = 0; previous_pgstatstatement->shared_blk_read_time = 0; previous_pgstatstatement->shared_blk_write_time = 0; previous_pgstatstatement->wal_records = 0; previous_pgstatstatement->wal_fpi = 0; previous_pgstatstatement->wal_bytes = 0; break; case SLRU: previous_pgstatslru = (struct pgstatslru *) pg_malloc(sizeof(struct pgstatslru)); previous_pgstatslru->blks_zeroed = 0; previous_pgstatslru->blks_hit = 0; previous_pgstatslru->blks_read = 0; previous_pgstatslru->blks_written = 0; previous_pgstatslru->blks_exists = 0; previous_pgstatslru->flushes = 0; previous_pgstatslru->truncates = 0; previous_pgstatslru->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case WAL: previous_pgstatwal = (struct pgstatwal *) pg_malloc(sizeof(struct pgstatwal)); previous_pgstatwal->wal_records = 0; previous_pgstatwal->wal_fpi = 0; previous_pgstatwal->wal_bytes = 0; previous_pgstatwal->wal_buffers_full = 0; previous_pgstatwal->wal_write = 0; previous_pgstatwal->wal_sync = 0; previous_pgstatwal->wal_write_time = 0; previous_pgstatwal->wal_sync_time = 0; previous_pgstatwal->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case XLOG: previous_xlogstats = (struct xlogstats *) pg_malloc(sizeof(struct xlogstats)); previous_xlogstats->location = pg_strdup("0/0"); previous_xlogstats->locationdiff = 0; break; case DEADLIVE: previous_deadlivestats = (struct deadlivestats *) pg_malloc(sizeof(struct deadlivestats)); previous_deadlivestats->live = 0; previous_deadlivestats->dead = 0; break; case REPSLOTS: previous_repslots = (struct repslots *) pg_malloc(sizeof(struct repslots)); previous_repslots->restartlsn = pg_strdup("0/0"); previous_repslots->restartlsndiff = 0; break; case BUFFERCACHE: case TEMPFILE: case WAITEVENT: case PROGRESS_ANALYZE: case PROGRESS_BASEBACKUP: case PROGRESS_CLUSTER: case PROGRESS_COPY: case PROGRESS_CREATEINDEX: case PROGRESS_VACUUM: case PBPOOLS: // no initialization worth doing... break; case PBSTATS: previous_pgbouncerstats = (struct pgbouncerstats *) pg_malloc(sizeof(struct pgbouncerstats)); previous_pgbouncerstats->total_request = 0; previous_pgbouncerstats->total_received = 0; previous_pgbouncerstats->total_sent = 0; previous_pgbouncerstats->total_query_time = 0; break; } } /* * Force a header to be prepended to the next output. */ static void needhdr(int dummy) { hdrcnt = 1; } /* * When the terminal is resized, force an update of the maximum number of rows * printed between each header repetition. Then force a new header to be * prepended to the next output. */ void needresize(int signo) { wresized = 1; } /* * Update the global `winlines' count of terminal rows. */ void doresize(void) { int status; struct winsize w; for (;;) { status = ioctl(fileno(stdout), TIOCGWINSZ, &w); if (status == -1 && errno == EINTR) continue; else if (status == -1) pg_log_error("ioctl"); if (w.ws_row > 3) winlines = w.ws_row - 3; else winlines = PGSTAT_DEFAULT_LINES; break; } /* * Inhibit doresize() calls until we are rescheduled by SIGWINCH. */ wresized = 0; hdrcnt = 1; } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { PQfinish(conn); exit(EXIT_FAILURE); } /* * Main function */ int main(int argc, char **argv) { const char *progname; ConnParams cparams; /* * If the user stops the program (control-Z) and then resumes it, * print out the header again. */ pqsignal(SIGCONT, needhdr); pqsignal(SIGINT, quit_properly); /* * If our standard output is a tty, then install a SIGWINCH handler * and set wresized so that our first iteration through the main * pgstat loop will peek at the terminal's current rows to find out * how many lines can fit in a screenful of output. */ if (isatty(fileno(stdout)) != 0) { wresized = 1; (void)signal(SIGWINCH, needresize); } else { wresized = 0; winlines = PGSTAT_DEFAULT_LINES; } /* Initialize the logging interface */ pg_logging_init(argv[0]); /* Get the program name */ progname = get_progname(argv[0]); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Set the connection struct */ cparams.pghost = opts->hostname; cparams.pgport = opts->port; cparams.pguser = opts->username; cparams.dbname = opts->dbname; cparams.prompt_password = TRI_DEFAULT; cparams.override_dbname = NULL; /* Connect to the database */ conn = connectDatabase(&cparams, progname, false, false, false); /* Get PostgreSQL version * (if we are not connected to the pseudo pgBouncer database) */ if (opts->stat != PBPOOLS && opts->stat != PBSTATS) { fetch_version(); } /* Without the -s option, defaults to the bgwriter statistics */ if (opts->stat == NONE) { opts->stat = BGWRITER; } /* Check if the release number matches the statistics */ if ((opts->stat == CONNECTION || opts->stat == XLOG) && !backend_minimum_version(9, 2)) { PQfinish(conn); pg_log_error("You need at least v9.2 for this statistic."); exit(EXIT_FAILURE); } if (opts->stat == ARCHIVER && !backend_minimum_version(9, 4)) { PQfinish(conn); pg_log_error("You need at least v9.4 for this statistic."); exit(EXIT_FAILURE); } if ((opts->stat == PROGRESS_VACUUM || opts->stat == WAITEVENT) && !backend_minimum_version(9, 6)) { PQfinish(conn); pg_log_error("You need at least v9.6 for this statistic."); exit(EXIT_FAILURE); } if ((opts->stat == PROGRESS_CREATEINDEX || opts->stat == PROGRESS_CLUSTER) && !backend_minimum_version(12, 0)) { PQfinish(conn); pg_log_error("You need at least v12 for this statistic."); exit(EXIT_FAILURE); } if ((opts->stat == PROGRESS_ANALYZE || opts->stat == PROGRESS_BASEBACKUP|| opts->stat == SLRU) && !backend_minimum_version(13, 0)) { PQfinish(conn); pg_log_error("You need at least v13 for this statistic."); exit(EXIT_FAILURE); } if ((opts->stat == WAL || opts->stat == PROGRESS_COPY) && !backend_minimum_version(14, 0)) { PQfinish(conn); pg_log_error("You need at least v14 for this statistic."); exit(EXIT_FAILURE); } /* Check if the configuration matches the statistics */ if (opts->stat == FUNCTION) { if (strcmp(fetch_setting("track_functions"), "none") == 0) { PQfinish(conn); pg_log_error("track_functions is set to \"none\"."); exit(EXIT_FAILURE); } } if (opts->stat == STATEMENT) { fetch_pgstatstatements_namespace(); if (!opts->namespace) { PQfinish(conn); pg_log_error("Cannot find the pg_stat_statements extension."); exit(EXIT_FAILURE); } } if (opts->stat == BUFFERCACHE) { fetch_pgbuffercache_namespace(); if (!opts->namespace) { PQfinish(conn); pg_log_error("Cannot find the pg_buffercache extension."); exit(EXIT_FAILURE); } } /* Filter required for replication slots */ if (opts->stat == REPSLOTS && !opts->filter) { PQfinish(conn); pg_log_error("You need to specify a replication slot with -f for this statistic."); exit(EXIT_FAILURE); } /* Allocate and initialize statistics struct */ allocate_struct(); /* Grab cluster stats info */ for (hdrcnt = 1;;) { if (!--hdrcnt) print_header(); print_line(); (void)fflush(stdout); if (--opts->count == 0) break; (void)usleep(opts->interval * 1000000); } PQfinish(conn); return 0; } pgstats-REL1_4_0/pgwaitevent.c000066400000000000000000000432061470250475400164240ustar00rootroot00000000000000/* * pgwaitevent, a PostgreSQL app to gather statistical informations * on wait events of PostgreSQL PID backend. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2019-2024. * * pgstats/pgwaitevent.c */ /* * PostgreSQL headers */ #include "postgres_fe.h" #include "common/logging.h" #include "fe_utils/connect_utils.h" #include "libpq/pqsignal.h" /* * Defines */ #define PGWAITEVENT_VERSION "1.4.0" #define PGWAITEVENT_DEFAULT_LINES 20 #define PGWAITEVENT_DEFAULT_STRING_SIZE 2048 /* * Structs */ /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; /* pid */ int pid; /* include leader and workers PIDs */ bool includeleaderworkers; /* frequency */ float interval; /* query and trace timestamps */ char *query_start; char *trace_start; }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif void fetch_version(void); bool backend_minimum_version(int major, int minor); void build_env(void); bool active_session(void); void handle_current_query(void); void drop_env(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s gathers every wait events from a specific PID, grouping them by queries.\n\n" "Usage:\n" " %s [OPTIONS] PID\n" "\nGeneral options:\n" " -g include leader and workers (parallel queries) [v13+]\n" " -i interval (default is 1s)\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; opts->pid = 0; opts->includeleaderworkers = false; opts->interval = 1; /* we should deal quickly with help and version */ if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgwaitevent " PGWAITEVENT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get options */ while ((c = getopt(argc, argv, "h:p:U:d:i:gv")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* parallel queries */ case 'g': opts->includeleaderworkers = true; break; /* interval */ case 'i': opts->interval = atof(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; /* get verbose */ case 'v': opts->verbose = true; break; default: pg_log_error("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } } /* get PID to monitor */ if (optind < argc) { opts->pid = atoi(argv[optind]); } else { pg_log_error("PID required.\n"); pg_log_info("Try \"%s --help\" for more information.\n", progname); exit(EXIT_FAILURE); } /* set dbname if unset */ if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment, preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { pg_log_error("out of memory (pg_malloc)\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { pg_log_error("cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { pg_log_error("out of memory (pg_strdup)\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { drop_env(); PQfinish(conn); exit(EXIT_FAILURE); } /* * Create function */ void build_env() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* build the DDL query */ snprintf(sql, sizeof(sql), "CREATE TEMPORARY TABLE waitevents (we text, wet text, o integer);\n" "ALTER TABLE waitevents ADD UNIQUE(we, wet);\n"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* print verbose */ if (opts->verbose) printf("Temporary table created\n"); /* cleanup */ PQclear(res); /* build the DDL query */ snprintf(sql, sizeof(sql), "CREATE SCHEMA pgwaitevent;"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* print verbose */ if (opts->verbose) printf("Schema created\n"); /* build the DDL query */ snprintf(sql, sizeof(sql), "CREATE OR REPLACE FUNCTION pgwaitevent.trace_wait_events_for_pid(p integer, leader boolean, s numeric default 1)\n" "RETURNS TABLE (wait_event text, wait_event_type text, occurences integer, percent numeric(5,2))\n" "LANGUAGE plpgsql\n" "AS $$\n" "DECLARE\n" " q text;\n" " r record;\n" "BEGIN\n" " -- check it is a backend\n" " SELECT query INTO q FROM pg_stat_activity\n" " WHERE pid=p AND backend_type='client backend' AND state='active';\n" "\n" " IF NOT FOUND THEN\n" " RAISE EXCEPTION 'PID %% doesn''t appear to be an active backend', p\n" " USING HINT = 'Check the PID and its state';\n" " END IF;\n" "\n" " -- logging\n" " RAISE LOG 'Tracing PID %%, sampling at %%s', p, s;\n" " RAISE LOG 'Query is <%%>', q;\n" "\n" " -- drop if exists, then create temp table\n" " TRUNCATE waitevents;\n" "\n" " -- loop till the end of the query\n" " LOOP\n" " -- get wait event\n" " IF leader THEN\n" " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" " INTO r\n" " FROM pg_stat_activity psa\n" " WHERE pid=p OR leader_pid=p;\n" " ELSE\n" " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" " INTO r\n" " FROM pg_stat_activity psa\n" " WHERE pid=p;\n" " END IF;\n" "\n" " -- loop control\n" " EXIT WHEN r.wait_event = 'ClientRead';\n" "\n" " -- update wait events stats\n" " INSERT INTO waitevents VALUES (r.wait_event, r.wait_event_type, 1)\n" " ON CONFLICT (we,wet) DO UPDATE SET o = waitevents.o+1;\n" "\n" " -- sleep a bit\n" " PERFORM pg_sleep(s);\n" " END LOOP;\n" "\n" " -- return stats\n" " RETURN QUERY\n" " SELECT we, wet, o, (o*100./sum(o) over ())::numeric(5,2)\n" " FROM waitevents\n" " ORDER BY o DESC;\n" "END\n" "$$;\n"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* print verbose */ if (opts->verbose) printf("Function created\n"); /* cleanup */ PQclear(res); } /* * Is PID an active session? */ bool active_session() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; bool active = false; /* build the query */ snprintf(sql, sizeof(sql), "SELECT state, query, query_start, now() FROM pg_stat_activity\n" "WHERE backend_type='client backend'\n" "AND pid=%d", opts->pid); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* if zero row, then PID is gone */ if (PQntuples(res) == 0) { printf("\nNo more session with PID %d, exiting...\n", opts->pid); drop_env(); PQfinish(conn); exit(2); } /* if one row, we found the good one */ if (PQntuples(res) == 1) { if (!strncmp(PQgetvalue(res, 0, 0), "active", 6)) { active = true; printf("\nNew query: %s\n", PQgetvalue(res, 0, 1)); opts->query_start = pg_strdup(PQgetvalue(res, 0, 2)); opts->trace_start = pg_strdup(PQgetvalue(res, 0, 3)); } } /* this also means that in case of multiple rows, we treat it as no rows */ /* cleanup */ PQclear(res); return active; } /* * Handle query */ void handle_current_query() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *workers_res; PGresult *trace_res; PGresult *duration_res; int nrows; int row; int nworkers = 0; if (opts->includeleaderworkers) { /* build the workers query if the user asked to include leader and workers */ snprintf(sql, sizeof(sql), "SELECT count(*) FROM pg_stat_activity " "WHERE pid=%d OR leader_pid=%d", opts->pid, opts->pid); /* execute it */ workers_res = PQexec(conn, sql); /* check and deal with errors */ if (!workers_res || PQresultStatus(workers_res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(workers_res); PQfinish(conn); exit(EXIT_FAILURE); } /* get the number of leader and workers */ nworkers = atoi(PQgetvalue(workers_res, 0, 0)); /* clean up */ PQclear(workers_res); } /* build the trace query */ snprintf(sql, sizeof(sql), "SELECT * FROM pgwaitevent.trace_wait_events_for_pid(%d, %s, %f);", opts->pid, opts->includeleaderworkers ? "'t'" : "'f'", opts->interval); /* execute it */ trace_res = PQexec(conn, sql); /* check and deal with errors */ if (!trace_res || PQresultStatus(trace_res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(trace_res); PQfinish(conn); exit(EXIT_FAILURE); } /* build the duration query */ snprintf(sql, sizeof(sql), "SELECT now()-'%s'::timestamptz, now()-'%s'::timestamptz;", opts->query_start, opts->trace_start); /* execute it */ duration_res = PQexec(conn, sql); /* check and deal with errors */ if (!duration_res || PQresultStatus(duration_res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(duration_res); PQfinish(conn); exit(EXIT_FAILURE); } /* show durations */ (void)printf("Query duration: %s\n", PQgetvalue(duration_res, 0, 0)); (void)printf("Trace duration: %s\n", PQgetvalue(duration_res, 0, 1)); /* show number of workers */ if (opts->includeleaderworkers) { (void)printf("Number of processes: %d\n", nworkers); } /* get the number of rows */ nrows = PQntuples(trace_res); /* print headers */ (void)printf( "┌───────────────────────────────────┬───────────┬────────────┬─────────┐\n" "│ Wait event │ WE type │ Occurences │ Percent │\n" "├───────────────────────────────────┼───────────┼────────────┼─────────┤\n"); /* for each row, print all columns in a row */ for (row = 0; row < nrows; row++) { (void)printf("│ %-33s │ %-9s │ %10ld │ %6.2f │\n", PQgetvalue(trace_res, row, 0), PQgetvalue(trace_res, row, 1), atol(PQgetvalue(trace_res, row, 2)), atof(PQgetvalue(trace_res, row, 3)) ); } /* print footers */ (void)printf( "└───────────────────────────────────┴───────────┴────────────┴─────────┘\n"); /* cleanup */ PQclear(duration_res); PQclear(trace_res); } /* * Drop env */ void drop_env() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* no need to drop the temp table */ /* drop function */ snprintf(sql, sizeof(sql), "DROP FUNCTION pgwaitevent.trace_wait_events_for_pid(integer, boolean, numeric)"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* print verbose */ if (opts->verbose) printf("Function dropped\n"); /* drop function */ snprintf(sql, sizeof(sql), "DROP SCHEMA pgwaitevent"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { pg_log_error("query failed: %s", PQerrorMessage(conn)); pg_log_info("query was: %s", sql); PQclear(res); PQfinish(conn); exit(EXIT_FAILURE); } /* print verbose */ if (opts->verbose) printf("Schema dropped\n"); /* cleanup */ PQclear(res); } /* * Main function */ int main(int argc, char **argv) { const char *progname; ConnParams cparams; /* * If the user stops the program, * quit nicely. */ pqsignal(SIGINT, quit_properly); /* Initialize the logging interface */ pg_logging_init(argv[0]); /* Get the program name */ progname = get_progname(argv[0]); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Set the connection struct */ cparams.pghost = opts->hostname; cparams.pgport = opts->port; cparams.pguser = opts->username; cparams.dbname = opts->dbname; cparams.prompt_password = TRI_DEFAULT; cparams.override_dbname = NULL; /* Connect to the database */ conn = connectDatabase(&cparams, progname, false, false, false); /* Fetch version */ fetch_version(); /* Check options */ if (opts->includeleaderworkers && !backend_minimum_version(13, 0)) { pg_log_error("You need at least v13 to include workers' wait events."); exit(EXIT_FAILURE); } /* Create the trace_wait_events_for_pid function */ build_env(); /* show what we're doing */ printf("Tracing wait events for PID %d, sampling at %.3fs, %s\n", opts->pid, opts->interval, opts->includeleaderworkers ? "including leader and workers" : "PID only"); while(true) { if (active_session()) { /* Handle query currently executed */ handle_current_query(); } /* wait 100ms */ (void)usleep(100000); } /* Drop the function */ drop_env(); PQfinish(conn); return 0; }