pax_global_header00006660000000000000000000000064140631043040014505gustar00rootroot0000000000000052 comment=adc299c687ae1d2f202b2d66fb5101b10bff4b5d pgstats-REL1_2_0/000077500000000000000000000000001406310430400136775ustar00rootroot00000000000000pgstats-REL1_2_0/.gitignore000066400000000000000000000000711406310430400156650ustar00rootroot00000000000000/pgcsvstat /pgdisplay /pgreport /pgstat /pgwaitevent *.o pgstats-REL1_2_0/BUGS000066400000000000000000000000001406310430400143500ustar00rootroot00000000000000pgstats-REL1_2_0/License000066400000000000000000000020751406310430400152100ustar00rootroot00000000000000These softwares, pgcsvstat, pgdisplay, pgreport, and pgstat, are released under the terms of the PostgreSQL License. Copyright (c) 2011-2021, Guillaume Lelarge Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL Guillaume Lelarge BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF Guillaume Lelarge HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Guillaume Lelarge SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND Guillaume Lelarge HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pgstats-REL1_2_0/Makefile000066400000000000000000000012131406310430400153340ustar00rootroot00000000000000PGFILEDESC = "Statistics utilities" PGAPPICON = win32 PROGRAMS = pgcsvstat pgstat pgdisplay pgwaitevent pgreport PG_CPPFLAGS = -I$(libpq_srcdir) PG_LIBS = $(libpq_pgport) SCRIPTS_built = pgcsvstat pgstat pgdisplay pgwaitevent pgreport EXTRA_CLEAN = rm -f $(addsuffix $(X), $(PROGRAMS)) $(addsuffix .o, $(PROGRAMS)) PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) all: $(PROGRAMS) pgreport.o: pgreport_queries.h %: %.o $(WIN32RES) $(CC) $(CFLAGS) $^ $(libpq_pgport) $(LDFLAGS) -lpgfeutils -lm -o $@$(X) pgcsvstat: pgcsvstat.o pgdisplay: pgdisplay.o pgstat: pgstat.o pgwaitevent: pgwaitevent.o pgreport: pgreport.o pgstats-REL1_2_0/README.md000066400000000000000000001025711406310430400151640ustar00rootroot00000000000000README ====== This repository contains the source of a collection of tools. pgstat is a vmstat-like tool for PostgreSQL. pgreport is a reporting tool for PostgreSQL. It tries to get a lot of informations from the metadata and statistics of PostgreSQL. pgwaitevent gathers every wait event for a specific PID, grouping them by queries. pgcsvstat outputs PostgreSQL statistics views into CSV files. The idea is that you can load them on any spreadsheet to get the graphs you want. pgdisplay tries to display a table in an informative way. Still pretty much experimental. They all should be compatible with the latest PostgreSQL release (13 right now), and down to the oldest stable release (9.5 right now). They may also be compatible with much older releases (8.x for most of them). Requirements ------------ You only need the libpq library, PostgreSQL header files, and the pg_config tool. The header files and the tool are usually available in a -dev package. Compilation ----------- You only have to do: ``` make make install ``` Usage ----- Use --help to get informations on all command line options for these three tools. More informations on pgstat --------------------------- pgstat is an online command tool that connects to a database and grabs its activity statistics. As PostgreSQL has many statistics, you have a command switch to choose the one you want (-s): * archiver for pg_stat_archiver (9.4+) * bgwriter for pg_stat_bgwriter * connection for connections by type (9.2+) * database for pg_stat_database * table for pg_stat_all_tables * tableio for pg_statio_all_tables * index for pg_stat_all_indexes * function for pg_stat_user_function * statement for pg_stat_statements * xlog for xlog writes (9.2+) * tempfile for temporary file usage * waitevent for wait events usage (9.6+) * progress_analyze to get the progress on an ANALYZE statement (13+) * progress_basebackup to get the progress on a BASE BACKUP (replication) statement (13+) * progress_cluster to get the progress on a CLUSTER/VACUUM FULL statement (12+) * progress_createindex to get the progress on a CREATE INDEX statement (12+) * progress_vacuum to get the progress on a VACUUM statement (9.6+) * pbpools for pgBouncer pools statistics * pbstats for pgBouncer general statistics It looks a lot like vmstat. You ask it the statistics you want, and the frequency to gather these statistics. Just like this: ``` $ pgstat -s connection - total - active - lockwaiting - idle in transaction - idle - 1546 15 0 0 1531 1544 17 0 0 1527 1544 14 0 0 1530 1546 26 0 0 1520 1543 21 0 0 1522 ``` Yeah, way too many idle connections. Actually, way too many connections. Definitely needs a pooler there. This is what happens on a 10-secondes 10-clients pgbench test: ``` $ pgstat -s database 1 - backends - ------ xacts ------ -------------- blocks -------------- -------------- tuples -------------- ------ temp ------ ------- misc -------- commit rollback read hit read_time write_time ret fet ins upd del files bytes conflicts deadlocks 1 224041 17 24768 2803774 0 0 4684398 234716 2105701 16615 113 1 14016512 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 3 0 0 205 0 0 92 92 0 0 0 0 0 0 0 11 20 0 0 500 0 0 1420 184 0 1 0 0 0 0 0 11 69 0 1 4438 0 0 1736 986 68 204 0 0 0 0 0 11 136 0 12 4406 0 0 1767 270 135 405 0 0 0 0 0 11 108 0 0 3434 0 0 1394 214 107 321 0 0 0 0 0 11 96 0 0 3290 0 0 1240 190 95 285 0 0 0 0 0 11 125 0 0 4045 0 0 1620 248 124 372 0 0 0 0 0 11 126 0 0 4222 0 0 1628 250 125 375 0 0 0 0 0 11 111 0 0 3644 0 0 1436 220 110 330 0 0 0 0 0 11 78 0 0 2549 0 0 1918 161 75 225 0 0 0 0 0 11 118 0 0 3933 0 0 1524 234 117 351 0 0 0 0 0 1 130 0 0 4276 0 0 1685 258 129 387 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` You clearly see when it starts, when it stops, and what it did during the 10 seconds. You can filter on a specific database with the -f command line option. Here is what happens at the tables level: ``` $ pgstat -s table -d b1 1 -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 68553 1467082 264957 266656 7919869 59312 113 57262 4611779 3782 5401 22 10 4 22 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 231 2351 1116 1222 61 184 0 180 61 124 245 2 0 0 0 431 1750 240 240 120 360 0 358 120 242 480 0 0 0 0 385 1640 220 220 110 330 0 327 110 11 440 0 0 0 0 340 1475 190 190 95 285 0 285 95 189 380 0 0 0 0 398 1651 222 222 111 333 0 331 111 -2 444 0 0 0 0 353 1519 198 198 99 297 0 293 99 200 396 0 0 0 0 335 1453 186 186 93 279 0 274 93 -210 372 0 0 0 0 446 1838 256 256 128 384 0 381 128 104 512 0 0 0 0 425 1739 238 238 119 357 0 354 119 241 476 0 0 0 0 360 1552 204 204 102 306 0 305 102 -10 408 0 0 0 0 386 1629 218 218 109 327 0 325 109 57 436 0 0 0 0 437 1761 242 242 121 363 0 363 121 -292 484 0 0 0 0 373 1563 206 206 103 309 0 305 103 -1 412 0 0 0 0 323 1442 184 184 92 276 0 273 92 188 368 0 0 0 0 412 1706 232 232 116 348 0 346 116 76 464 0 0 0 0 291 1332 164 164 82 246 0 245 82 -216 328 0 0 0 0 189 1013 106 106 53 159 0 158 53 106 212 0 0 0 0 346 1508 196 196 98 294 0 290 98 -18 392 0 0 0 0 304 1376 172 172 86 258 0 258 86 -156 344 0 0 0 0 442 1794 248 248 124 372 0 368 124 -260 496 0 0 0 0 9 1371 157 260 0 13 0 13 -11602 -329 -6053 0 2 0 3 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 3 430 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` You can also filter by table name with the -f command line switch: ``` $ pgstat -s table -d b1 -f pgbench_history 1 -- sequential -- ------ index ------ ----------------- tuples -------------------------- -------------- maintenance -------------- scan tuples scan tuples ins upd del hotupd live dead analyze vacuum autovacuum analyze autoanalyze 0 0 0 0 21750 0 0 0 2022 0 0 1 0 1 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 64 0 0 0 64 0 64 0 0 0 0 0 0 0 0 122 0 0 0 122 0 122 0 0 0 0 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 88 0 0 0 88 0 88 0 0 0 0 0 0 0 0 116 0 0 0 116 0 116 0 0 0 0 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 61 0 0 0 61 0 61 0 0 0 0 0 0 0 0 42 0 0 0 42 0 42 0 0 0 0 0 0 0 0 106 0 0 0 106 0 106 0 0 0 0 0 0 0 0 55 0 0 0 55 0 55 0 0 0 0 0 0 0 0 121 0 0 0 121 0 121 0 0 0 0 0 0 0 0 68 0 0 0 -1942 0 -1011 0 0 0 1 0 0 0 0 99 0 0 0 99 0 99 0 0 0 0 0 0 0 0 109 0 0 0 109 0 109 0 0 0 0 0 0 0 0 94 0 0 0 94 0 94 0 0 0 0 0 0 0 0 120 0 0 0 120 0 120 0 0 0 0 0 0 0 0 110 0 0 0 110 0 110 0 0 0 0 0 0 0 0 100 0 0 0 100 0 100 0 0 0 0 0 0 0 0 115 0 0 0 115 0 115 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` We see that the activity on this table is quite different from what happens to the other tables. There's also a report from the pg_stat_statements extension. It works pretty well: ``` $ pgstat -s statement -d b1 --------- misc ---------- ----------- shared ----------- ----------- local ----------- ----- temp ----- -------- time -------- calls time rows hit read dirty written hit read dirty written read written read written 383843 1756456.50 13236523 9277049 38794 50915 1640 1008844 17703 8850 8850 1711 1711 0.00 0.00 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.50 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.75 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 310 2709.88 220 1527 10 63 0 0 0 0 0 0 0 0.00 0.00 797 8555.00 569 3736 10 109 0 0 0 0 0 0 0 0.00 0.00 725 9215.25 519 3610 23 115 0 0 0 0 0 0 0 0.00 0.00 266 7729.38 190 1257 2 43 0 0 0 0 0 0 0 0.00 0.00 831 10196.12 594 3988 11 112 0 0 0 0 0 0 0 0.00 0.00 788 8678.38 563 3803 8 92 0 0 0 0 0 0 0 0.00 0.00 736 9080.62 526 3616 7 89 0 0 0 0 0 0 0 0.00 0.00 792 8395.50 566 3742 11 96 0 0 0 0 0 0 0 0.00 0.00 814 9346.75 582 3985 9 84 0 0 0 0 0 0 0 0.00 0.00 763 8941.12 545 3799 9 84 0 0 0 0 0 0 0 0.00 0.00 728 8543.25 520 3549 8 62 0 0 0 0 0 0 0 0.00 0.00 589 9143.62 421 2812 7 45 0 0 0 0 0 0 0 0.00 0.00 785 8710.00 561 3788 4 60 0 0 0 0 0 0 0 0.00 0.00 785 9117.25 561 3885 4 60 0 0 0 0 0 0 0 0.00 0.00 785 8397.12 561 3788 1 52 0 0 0 0 0 0 0 0.00 0.00 799 9398.12 571 3925 7 60 0 0 0 0 0 0 0 0.00 0.00 765 9033.88 547 3757 3 43 0 0 0 0 0 0 0 0.00 0.00 805 8663.25 575 3886 6 57 0 0 0 0 0 0 0 0.00 0.00 765 8490.50 547 3661 7 39 0 0 0 0 0 0 0 0.00 0.00 764 8850.00 546 3698 4 41 0 0 0 0 0 0 0 0.00 0.00 396 6706.50 283 1992 1 14 0 0 0 0 0 0 0 0.00 0.00 1 0.38 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 1 0.62 1 0 0 0 0 0 0 0 0 0 0 0.00 0.00 ``` You can filter a specific statement by its query id. Of course, it first searches for the extension, and complains if it isn't there: ``` $ pgstat -s statement -d b2 pgstat: Cannot find the pg_stat_statements extension. ``` One of my customers had a lot of writes on their databases, and I wanted to know how much writes occured in the WAL files. vmstat would only tell me how much writes on all files, but I was only interested in WAL writes. So I added a new report that grabs the current XLOG position, and diff it with the previous XLOG position. It gives something like this with a pgbench test: ``` $ ./pgstat -s xlog -------- filename -------- -- location -- ---- bytes ---- 00000001000000000000003E 0/3EC49940 1053071680 00000001000000000000003E 0/3EC49940 0 00000001000000000000003E 0/3EC49940 0 00000001000000000000003E 0/3EC875F8 253112 00000001000000000000003E 0/3ED585C8 856016 00000001000000000000003E 0/3EE36C40 910968 00000001000000000000003E 0/3EEFCC58 811032 00000001000000000000003E 0/3EFAB9D0 716152 00000001000000000000003F 0/3F06A3C0 780784 00000001000000000000003F 0/3F0E79E0 513568 00000001000000000000003F 0/3F1354E0 318208 00000001000000000000003F 0/3F1F6218 789816 00000001000000000000003F 0/3F2BCE00 814056 00000001000000000000003F 0/3F323240 418880 00000001000000000000003F 0/3F323240 0 00000001000000000000003F 0/3F323240 0 ``` That's not big numbers, so it's easy to find it writes at 253K/s, but if the number were bigger, it might get hard to read. One of my co-worker, Julien Rouhaud, added a human readable option: ``` $ ./pgstat -s xlog -H -------- filename -------- -- location -- ---- bytes ---- 00000001000000000000003F 0/3F32EDC0 1011 MB 00000001000000000000003F 0/3F32EDC0 0 bytes 00000001000000000000003F 0/3F32EDC0 0 bytes 00000001000000000000003F 0/3F3ABC78 500 kB 00000001000000000000003F 0/3F491C10 920 kB 00000001000000000000003F 0/3F568548 858 kB 00000001000000000000003F 0/3F634748 817 kB 00000001000000000000003F 0/3F6F4378 767 kB 00000001000000000000003F 0/3F7A56D8 709 kB 00000001000000000000003F 0/3F8413D0 623 kB 00000001000000000000003F 0/3F8D7590 600 kB 00000001000000000000003F 0/3F970160 611 kB 00000001000000000000003F 0/3F9F2840 522 kB 00000001000000000000003F 0/3FA1FD88 181 kB 00000001000000000000003F 0/3FA1FD88 0 bytes 00000001000000000000003F 0/3FA1FD88 0 bytes 00000001000000000000003F 0/3FA1FD88 0 bytes ``` That's indeed much more readable if you ask me. Another customer wanted to know how many temporary files were written, and their sizes. Of course, you can get that with the pg_stat_database view, but it only gets added when the query is done. We wanted to know when the query is executed. So I added another report: ``` $ ./pgstat -s tempfile --- size --- --- count --- 0 0 0 0 13082624 1 34979840 1 56016896 1 56016896 1 56016896 1 0 0 0 0 ``` You see the file being stored. Since release 9.6, there are some very interesting progress views. Here is an example that shows the VACUUM progress on a table. We can see the progress while it goes through different phases. ``` $ ./pgstat -s progressvacuum --------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- database relation size %scan %vacuum #index %dead tuple bdd_alfresco alf_node_properties 254 GB scanning heap 39.95 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 39.98 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.01 0.00 0 0.21 bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 40.02 0.00 0 0.21 bdd_alfresco alf_prop_unique_ctx 1792 kB vacuuming indexes 100.00 0.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 40.07 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.10 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.13 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.15 0.00 0 0.21 bdd_alfresco alf_node_properties 254 GB scanning heap 40.17 0.00 0 0.21 ... bdd_alfresco alf_node_properties 254 GB scanning heap 47.10 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.13 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.16 0.00 0 0.47 bdd_alfresco alf_prop_unique_ctx 1792 kB cleaning up indexes 100.00 100.00 0 0.00 bdd_alfresco alf_node_properties 254 GB scanning heap 47.18 0.00 0 0.48 bdd_alfresco alf_node_properties 254 GB scanning heap 47.21 0.00 0 0.48 ... bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 bdd_alfresco alf_node_properties 255 GB vacuuming indexes 100.00 0.00 0 30.18 ... ``` Information shown depends on the progress views. More informations on pgwaitevent -------------------------------- The pgwaitevent tool waits the execution of a query on a specific PID backend. It then gathers all the waiting events, and sums them up. At the end of the query, it prints a table with the waiting events, their occurences, and percentage. Here is an exemple of a session with this tool: ``` $ ./pgwaitevent -i 0.1 548292 Tracing wait events for PID 548292, sampling at 0.100s New query: truncate t1; Query duration: 00:00:00.324883 Trace duration: 00:00:00.313353 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 3 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:02.077609 Trace duration: 00:00:02.038534 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALSync │ IO │ 12 │ 60.00 │ │ [Running] │ │ 5 │ 25.00 │ │ WALWriteLock │ LWLock │ 3 │ 15.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<5000; Query duration: 00:00:00.207713 Trace duration: 00:00:00.108132 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 1 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<500000; Query duration: 00:00:00.357929 Trace duration: 00:00:00.312559 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 3 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.908082 Trace duration: 00:00:01.8308 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALWriteLock │ LWLock │ 6 │ 33.33 │ │ [Running] │ │ 5 │ 27.78 │ │ WALSync │ IO │ 4 │ 22.22 │ │ WALWrite │ IO │ 2 │ 11.11 │ │ DataFileExtend │ IO │ 1 │ 5.56 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.602976 Trace duration: 00:00:01.524851 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ WALSync │ IO │ 7 │ 46.67 │ │ [Running] │ │ 4 │ 26.67 │ │ WALWriteLock │ LWLock │ 3 │ 20.00 │ │ WALWrite │ IO │ 1 │ 6.67 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: insert into t1 select generate_series(1, 1000000); Query duration: 00:00:01.638675 Trace duration: 00:00:01.630696 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 8 │ 50.00 │ │ WALWriteLock │ LWLock │ 4 │ 25.00 │ │ WALSync │ IO │ 4 │ 25.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: select * from t1 where id<500000; Query duration: 00:00:00.893073 Trace duration: 00:00:00.819036 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 8 │ 100.00 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ New query: create index on t1(id); Query duration: 00:00:04.051142 Trace duration: 00:00:03.955806 ┌───────────────────────────────────┬───────────┬────────────┬─────────┐ │ Wait event │ WE type │ Occurences │ Percent │ ├───────────────────────────────────┼───────────┼────────────┼─────────┤ │ [Running] │ │ 15 │ 38.46 │ │ WALSync │ IO │ 15 │ 38.46 │ │ DataFileImmediateSync │ IO │ 5 │ 12.82 │ │ WALWriteLock │ LWLock │ 4 │ 10.26 │ └───────────────────────────────────┴───────────┴────────────┴─────────┘ No more session with PID 548292, exiting... ``` It sleeps 100msec before checking if a new query is being executed. It checks waiting events on an interval set up with the `-i` command line option. By default, it's 1 second (which is a bit on the high end). Starting with PostgreSQL 13, pgwaitevent is able to include leader and workers. You need the -g command line option for this. Ideas ----- * pgstat * pg_stat_archiver: display the current wal and the last archived * pg_stat_archiver: display the duration since the last archived wal * pg_stat_X_tables: display the duration since the last vacuum and analyze * sum the number of archived wal files * pgcsvstat * add export of pg_stat_walreceiver * add export of progress views pgstats-REL1_2_0/pgcsvstat.c000066400000000000000000000721301406310430400160640ustar00rootroot00000000000000/* * pgcsvstat, a PostgreSQL app to gather statistical informations * from a PostgreSQL database. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2011-2021. * * pgstats/pgcsvstat.c */ /* * Headers */ #include "postgres_fe.h" #include "common/string.h" #include #include #include #ifdef HAVE_GETOPT_H #include #endif extern char *optarg; #include "libpq-fe.h" /* * Defines */ #define PGCSVSTAT_VERSION "1.2.0" /* these are the opts structures for command line params */ struct options { bool quiet; bool nodb; char *directory; char *dbname; char *hostname; char *port; char *username; int major; int minor; }; /* global variables */ struct options *opts; PGconn *conn; /* function prototypes */ static void help(const char *progname); void get_opts(int, char **); void *myalloc(size_t size); char *mystrdup(const char *str); PGconn *sql_conn(void); int sql_exec(const char *sql, const char *filename, bool quiet); void sql_exec_dump_pgstatactivity(void); void sql_exec_dump_pgstatarchiver(void); void sql_exec_dump_pgstatbgwriter(void); void sql_exec_dump_pgstatdatabase(void); void sql_exec_dump_pgstatdatabaseconflicts(void); void sql_exec_dump_pgstatreplication(void); void sql_exec_dump_pgstatreplicationslots(void); void sql_exec_dump_pgstatslru(void); void sql_exec_dump_pgstatsubscription(void); void sql_exec_dump_pgstatwal(void); void sql_exec_dump_pgstatalltables(void); void sql_exec_dump_pgstatallindexes(void); void sql_exec_dump_pgstatioalltables(void); void sql_exec_dump_pgstatioallindexes(void); void sql_exec_dump_pgstatioallsequences(void); void sql_exec_dump_pgstatuserfunctions(void); void sql_exec_dump_pgclass_size(void); void sql_exec_dump_pgstatstatements(void); void sql_exec_dump_xlog_stat(void); void fetch_version(void); bool check_superuser(void); bool backend_minimum_version(int major, int minor); bool backend_has_pgstatstatements(void); /* function to parse command line options and check for some usage errors. */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->quiet = false; opts->nodb = false; opts->directory = NULL; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgcsvstats " PGCSVSTAT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:p:U:d:D:q")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = mystrdup(optarg); break; /* specify the directory */ case 'D': opts->directory = mystrdup(optarg); break; /* don't show headers */ case 'q': opts->quiet = true; break; /* host to connect to */ case 'h': opts->hostname = mystrdup(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = mystrdup(optarg); break; /* username */ case 'U': opts->username = mystrdup(optarg); break; default: fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); exit(1); } } } static void help(const char *progname) { printf("%s gathers statistics from a PostgreSQL database.\n\n" "Usage:\n" " %s [OPTIONS]...\n" "\nGeneral options:\n" " -d DBNAME database to connect to\n" " -D DIRECTORY directory for stats files (defaults to current)\n" " -q quiet\n" " --help show this help, then exit\n" " --version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" "\nThe default action is to create CSV files for each report.\n\n" "Report bugs to .\n", progname, progname); } void * myalloc(size_t size) { void *ptr = malloc(size); if (!ptr) { fprintf(stderr, "out of memory"); exit(1); } return ptr; } char * mystrdup(const char *str) { char *result = strdup(str); if (!result) { fprintf(stderr, "out of memory"); exit(1); } return result; } /* establish connection with database. */ PGconn * sql_conn() { PGconn *my_conn; char *password = NULL; bool new_pass; #if PG_VERSION_NUM >= 90300 const char **keywords; const char **values; #else int size; char *dns; #endif char *message; /* * Start the connection. Loop until we have a password if requested by * backend. */ do { #if PG_VERSION_NUM >= 90300 /* * We don't need to check if the database name is actually a complete * connection string, PQconnectdbParams being smart enough to check * this itself. */ #define PARAMS_ARRAY_SIZE 8 keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = opts->hostname, keywords[1] = "port"; values[1] = opts->port; keywords[2] = "user"; values[2] = opts->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = opts->dbname; keywords[5] = "fallback_application_name"; values[5] = "pgcsvstat"; keywords[7] = NULL; values[7] = NULL; my_conn = PQconnectdbParams(keywords, values, true); #else /* 34 is the length of the fallback application name setting */ size = 34; if (opts->hostname) size += strlen(opts->hostname) + 6; if (opts->port) size += strlen(opts->port) + 6; if (opts->username) size += strlen(opts->username) + 6; if (opts->dbname) size += strlen(opts->dbname) + 8; dns = pg_malloc(size); /* * Checking the presence of a = sign is our way to check that the * database name is actually a connection string. In such a case, we * keep this string as the connection string, and add other parameters * if they are supplied. */ sprintf(dns, "%s", "fallback_application_name='pgcsvstat' "); if (strchr(opts->dbname, '=') != NULL) sprintf(dns, "%s%s", dns, opts->dbname); else if (opts->dbname) sprintf(dns, "%sdbname=%s ", dns, opts->dbname); if (opts->hostname) sprintf(dns, "%shost=%s ", dns, opts->hostname); if (opts->port) sprintf(dns, "%sport=%s ", dns, opts->port); if (opts->username) sprintf(dns, "%suser=%s ", dns, opts->username); if (opts->verbose) printf("Connection string: %s\n", dns); my_conn = PQconnectdb(dns); #endif new_pass = false; if (!my_conn) { errx(1, "could not connect to database %s\n", opts->dbname); } #if PG_VERSION_NUM >= 80200 if (PQstatus(my_conn) == CONNECTION_BAD && PQconnectionNeedsPassword(my_conn) && !password) { PQfinish(my_conn); #if PG_VERSION_NUM < 100000 password = simple_prompt("Password: ", 100, false); #elif PG_VERSION_NUM < 140000 simple_prompt("Password: ", password, 100, false); #else password = simple_prompt("Password: ", false); #endif new_pass = true; } #endif } while (new_pass); if (password) free(password); /* check to see that the backend connection was successfully made */ if (PQstatus(my_conn) == CONNECTION_BAD) { message = PQerrorMessage(my_conn); PQfinish(my_conn); errx(1, "could not connect to database %s: %s", opts->dbname, message); } /* return the conn if good */ return my_conn; } /* * Actual code to make call to the database and print the output data. */ int sql_exec(const char *query, const char* filename, bool quiet) { PGresult *res; FILE *fdcsv; struct stat st; int nfields; int nrows; int i, j; int size; /* open the csv file */ fdcsv = fopen(filename, "a"); if (!fdcsv) { fprintf(stderr, "pgcsvstat: fopen failed: %d\n", errno); fprintf(stderr, "pgcsvstat: filename was: %s\n", filename); PQfinish(conn); exit(-1); } /* get size of file */ stat(filename, &st); size = st.st_size; /* make the call */ res = PQexec(conn, query); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { fprintf(stderr, "pgcsvstat: query failed: %s\n", PQerrorMessage(conn)); fprintf(stderr, "pgcsvstat: query was: %s\n", query); PQclear(res); PQfinish(conn); exit(-1); } /* get the number of fields */ nrows = PQntuples(res); nfields = PQnfields(res); /* print a header */ if (!quiet && size == 0) { for (j = 0; j < nfields; j++) { fprintf(fdcsv, "%s", PQfname(res, j)); if (j < nfields - 1) fprintf(fdcsv, ";"); } fprintf(fdcsv, "\n"); } /* for each row, dump the information */ for (i = 0; i < nrows; i++) { for (j = 0; j < nfields; j++) { fprintf(fdcsv, "%s", PQgetvalue(res, i, j)); if (j < nfields - 1) fprintf(fdcsv, ";"); } fprintf(fdcsv, "\n"); } /* cleanup */ PQclear(res); /* close the csv file */ fclose(fdcsv); return 0; } /* * Dump all activities. */ void sql_exec_dump_pgstatactivity() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), datid, datname, %s, %s" "usesysid, usename, %s%s%s%s%s" "date_trunc('seconds', query_start) AS query_start, " "%s%s%s%s%s%s%s state " "FROM pg_stat_activity " "ORDER BY %s", backend_minimum_version(9, 2) ? "pid" : "procpid", backend_minimum_version(13, 0) ? "leader_pid, " : "", backend_minimum_version(9, 0) ? "application_name, " : "", backend_minimum_version(8, 1) ? "client_addr, " : "", backend_minimum_version(9, 1) ? "client_hostname, " : "", backend_minimum_version(8, 1) ? "client_port, date_trunc('seconds', backend_start) AS backend_start, " : "", backend_minimum_version(8, 3) ? "date_trunc('seconds', xact_start) AS xact_start, " : "", backend_minimum_version(9, 2) ? "state_change, " : "", backend_minimum_version(9, 6) ? "wait_event_type, wait_event, " : backend_minimum_version(8, 2) ? "waiting, " : "", backend_minimum_version(9, 4) ? "backend_xid, " : "", backend_minimum_version(9, 4) ? "backend_xmin, " : "", backend_minimum_version(14, 0) ? "query_id, " : "", backend_minimum_version(9, 2) ? "query, " : "current_query,", backend_minimum_version(10, 0) ? "backend_type, " : "", backend_minimum_version(9, 2) ? "pid" : "procpid"); // the last one is for the ORDER BY snprintf(filename, sizeof(filename), "%s/pg_stat_activity.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all bgwriter stats. */ void sql_exec_dump_pgstatbgwriter() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), checkpoints_timed, " "checkpoints_req, %sbuffers_checkpoint, buffers_clean, " "maxwritten_clean, buffers_backend, %sbuffers_alloc%s " "FROM pg_stat_bgwriter ", backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "", backend_minimum_version(9, 1) ? "buffers_backend_fsync, " : "", backend_minimum_version(9, 1) ? ", date_trunc('seconds', stats_reset) AS stats_reset " : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_bgwriter.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all archiver stats. */ void sql_exec_dump_pgstatarchiver() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), archived_count, " "last_archived_wal, date_trunc('seconds', last_archived_time) AS last_archived_time, " "failed_count, " "last_failed_wal, date_trunc('seconds', last_failed_time) AS last_failed_time, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_archiver "); snprintf(filename, sizeof(filename), "%s/pg_stat_archiver.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all databases stats. * to be fixed wrt v14 */ void sql_exec_dump_pgstatdatabase() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), datid, datname, " "numbackends, xact_commit, xact_rollback, blks_read, blks_hit" "%s%s%s%s%s " "FROM pg_stat_database " "ORDER BY datname", backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "", backend_minimum_version(9, 1) ? ", conflicts, date_trunc('seconds', stats_reset) AS stats_reset" : "", backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : "", backend_minimum_version(12, 0) ? ", checksum_failures, checksum_last_failure" : "", backend_minimum_version(14, 0) ? ", session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_database.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all databases conflicts stats. */ void sql_exec_dump_pgstatdatabaseconflicts() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_stat_database_conflicts " "ORDER BY datname"); snprintf(filename, sizeof(filename), "%s/pg_stat_database_conflicts.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all replication stats. */ void sql_exec_dump_pgstatreplication() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), %s, usesysid, usename, " "application_name, client_addr, client_hostname, client_port, " "date_trunc('seconds', backend_start) AS backend_start, %sstate, " "%s AS master_location, %s%s" "sync_priority, " "sync_state%s " "FROM pg_stat_replication " "ORDER BY application_name", backend_minimum_version(9, 2) ? "pid" : "procpid", backend_minimum_version(9, 4) ? "backend_xmin, " : "", backend_minimum_version(10, 0) ? "pg_current_wal_lsn()" : "pg_current_xlog_location()", backend_minimum_version(10, 0) ? "sent_lsn, write_lsn, flush_lsn, replay_lsn, " : "sent_location, write_location, flush_location, replay_location, ", backend_minimum_version(10, 0) ? "write_lag, flush_lag, replay_lag, " : "", backend_minimum_version(12, 0) ? ", reply_time" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_replication.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all replication slots stats. */ void sql_exec_dump_pgstatreplicationslots() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), slot_name, " "spill_txns, spill_count, spill_bytes, " "stream_txns, stream_count, stream_bytes, " "total_txns, total_bytes, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_replication_slots " "ORDER BY slot_name"); snprintf(filename, sizeof(filename), "%s/pg_stat_replication_slots.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all SLRU stats. */ void sql_exec_dump_pgstatslru() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), name, " "blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, " "flushes, truncates, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_slru " "ORDER BY name"); snprintf(filename, sizeof(filename), "%s/pg_stat_slru.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all subscriptions stats. */ void sql_exec_dump_pgstatsubscription() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), subid, subname, " "pid, relid, received_lsn, " "date_trunc('seconds', last_msg_send_time) AS last_msg_send_time, " "date_trunc('seconds', last_msg_receipt_time) AS last_msg_receipt_time, " "latest_end_lsn, date_trunc('seconds', latest_end_time) AS latest_end_time " "FROM pg_stat_subscription " "ORDER BY subid"); snprintf(filename, sizeof(filename), "%s/pg_stat_subscription.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all WAL stats. */ void sql_exec_dump_pgstatwal() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), " "wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, " "wal_sync, wal_write_time, wal_sync_time, " "date_trunc('seconds', stats_reset) AS stats_reset " "FROM pg_stat_wal"); snprintf(filename, sizeof(filename), "%s/pg_stat_wal.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all tables stats. */ void sql_exec_dump_pgstatalltables() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), relid, schemaname, relname, " "seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, " "n_tup_upd, n_tup_del" "%s" "%s" "%s" "%s" "%s" " FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname", backend_minimum_version(8, 3) ? ", n_tup_hot_upd, n_live_tup, n_dead_tup" : "", backend_minimum_version(9, 4) ? ", n_mod_since_analyze" : "", backend_minimum_version(13, 0) ? ", n_ins_since_vacuum" : "", backend_minimum_version(8, 2) ? ", date_trunc('seconds', last_vacuum) AS last_vacuum, date_trunc('seconds', last_autovacuum) AS last_autovacuum, date_trunc('seconds',last_analyze) AS last_analyze, date_trunc('seconds',last_autoanalyze) AS last_autoanalyze" : "", backend_minimum_version(9, 1) ? ", vacuum_count, autovacuum_count, analyze_count, autoanalyze_count" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_all_tables.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all indexes stats. */ void sql_exec_dump_pgstatallindexes() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_stat_all_indexes.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all tables IO stats. */ void sql_exec_dump_pgstatioalltables() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_tables.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all indexes IO stats. */ void sql_exec_dump_pgstatioallindexes() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_indexes " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_indexes.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all sequences IO stats. */ void sql_exec_dump_pgstatioallsequences() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_statio_all_sequences " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, relname"); snprintf(filename, sizeof(filename), "%s/pg_statio_all_sequences.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all functions stats. */ void sql_exec_dump_pgstatuserfunctions() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), * " "FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' " "ORDER BY schemaname, funcname"); snprintf(filename, sizeof(filename), "%s/pg_stat_user_functions.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all size class stats. */ void sql_exec_dump_pgclass_size() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), n.nspname, c.relname, c.relkind, c.reltuples, c.relpages%s " "FROM pg_class c, pg_namespace n " "WHERE n.oid=c.relnamespace AND n.nspname <> 'information_schema' " "ORDER BY n.nspname, c.relname", backend_minimum_version(8, 1) ? ", pg_relation_size(c.oid)" : ""); snprintf(filename, sizeof(filename), "%s/pg_class_size.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all statements stats. * to be fixed wrt v14 */ void sql_exec_dump_pgstatstatements() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT date_trunc('seconds', now()), r.rolname, d.datname, " "%sregexp_replace(query, E'\n', ' ', 'g') as query, %scalls, %s, rows, " "shared_blks_hit, shared_blks_read, shared_blks_written, " "local_blks_hit, local_blks_read, local_blks_written, " "temp_blks_read, temp_blks_written%s " "FROM pg_stat_statements q, pg_database d, pg_roles r " "WHERE q.userid=r.oid and q.dbid=d.oid " "ORDER BY r.rolname, d.datname", backend_minimum_version(14, 0) ? "toplevel, queryid, " : "", backend_minimum_version(13, 0) ? "plans, total_plan_time, min_plan_time, max_plan_time, mean_plan_time, stddev_plan_time, " : "", backend_minimum_version(13, 0) ? "total_exec_time, min_exec_time, max_exec_time, mean_exec_time, stddev_exec_time" : "total_time", backend_minimum_version(14, 0) ? ", wal_records, wal_fpi, wal_bytes" : ""); snprintf(filename, sizeof(filename), "%s/pg_stat_statements.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Dump all xlog stats. */ void sql_exec_dump_xlog_stat() { char query[1024]; char filename[1024]; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), backend_minimum_version(10, 0) ? "SELECT date_trunc('seconds', now()), pg_walfile_name(pg_current_wal_lsn())=pg_ls_dir AS current, pg_ls_dir AS filename, " "(SELECT modification FROM pg_stat_file('pg_wal/'||pg_ls_dir)) AS modification_timestamp " "FROM pg_ls_dir('pg_wal') " "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " "ORDER BY pg_ls_dir" : "SELECT date_trunc('seconds', now()), pg_xlogfile_name(pg_current_xlog_location())=pg_ls_dir AS current, pg_ls_dir AS filename, " "(SELECT modification FROM pg_stat_file('pg_xlog/'||pg_ls_dir)) AS modification_timestamp " "FROM pg_ls_dir('pg_xlog') " "WHERE pg_ls_dir ~ E'^[0-9A-F]{24}' " "ORDER BY pg_ls_dir"); snprintf(filename, sizeof(filename), "%s/pg_xlog_stat.csv", opts->directory); sql_exec(query, filename, opts->quiet); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char query[1024]; PGresult *res; /* get the oid and database name from the system pg_database table */ snprintf(query, sizeof(query), "SELECT version()"); /* make the call */ res = PQexec(conn, query); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { fprintf(stderr, "pgcsvstat: query failed: %s\n", PQerrorMessage(conn)); fprintf(stderr, "pgcsvstat: query was: %s\n", query); PQclear(res); PQfinish(conn); exit(-1); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (!opts->quiet) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Check if user has the superuser attribute */ bool check_superuser() { PGresult *res; char sql[1024]; bool is_superuser = false; /* get the oid and database name from the system pg_database table */ snprintf(sql, sizeof(sql), "SELECT rolsuper FROM pg_roles WHERE rolname=current_user "); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { fprintf(stderr, "pgcsvstat: query failed: %s\n", PQerrorMessage(conn)); fprintf(stderr, "pgcsvstat: query was: %s\n", sql); PQclear(res); PQfinish(conn); exit(-1); } /* get the information */ is_superuser = strncmp(PQgetvalue(res, 0, 0), "t", 1) == 0; /* cleanup */ PQclear(res); return is_superuser; } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Check if backend has the pg_stat_statements view */ bool backend_has_pgstatstatements() { PGresult *res; char sql[1024]; bool has_pgstatstatements = false; /* get the oid and database name from the system pg_database table */ snprintf(sql, sizeof(sql), "SELECT 1 " "FROM pg_proc p, pg_namespace n " "WHERE p.proname='pg_stat_statements' " " AND p.pronamespace=n.oid"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { fprintf(stderr, "pgcsvstat: query failed: %s\n", PQerrorMessage(conn)); fprintf(stderr, "pgcsvstat: query was: %s\n", sql); PQclear(res); PQfinish(conn); exit(-1); } /* get the information */ has_pgstatstatements = PQntuples(res)>0; /* cleanup */ PQclear(res); return has_pgstatstatements; } int main(int argc, char **argv) { bool is_superuser = false; opts = (struct options *) myalloc(sizeof(struct options)); /* parse the opts */ get_opts(argc, argv); if (opts->dbname == NULL) { opts->dbname = "postgres"; opts->nodb = true; } if (opts->directory == NULL) { opts->directory = "./"; } /* connect to the database */ conn = sql_conn(); /* get version */ fetch_version(); /* check superuser attribute */ is_superuser = check_superuser(); /* grab cluster stats info */ sql_exec_dump_pgstatactivity(); if (backend_minimum_version(9, 4)) sql_exec_dump_pgstatarchiver(); if (backend_minimum_version(8, 3)) sql_exec_dump_pgstatbgwriter(); sql_exec_dump_pgstatdatabase(); if (backend_minimum_version(9, 1)) { sql_exec_dump_pgstatdatabaseconflicts(); sql_exec_dump_pgstatreplication(); } if (backend_minimum_version(14, 0)) sql_exec_dump_pgstatreplicationslots(); if (backend_minimum_version(13, 0)) sql_exec_dump_pgstatslru(); if (backend_minimum_version(10, 0)) sql_exec_dump_pgstatsubscription(); if (backend_minimum_version(14, 0)) sql_exec_dump_pgstatwal(); /* TODO pg_stat_wal_receiver */ /* grab database stats info */ sql_exec_dump_pgstatalltables(); sql_exec_dump_pgstatallindexes(); sql_exec_dump_pgstatioalltables(); sql_exec_dump_pgstatioallindexes(); sql_exec_dump_pgstatioallsequences(); if (backend_minimum_version(8, 4)) sql_exec_dump_pgstatuserfunctions(); /* grab progress stats info */ /* TODO */ /* grab other informations */ sql_exec_dump_pgclass_size(); if (backend_has_pgstatstatements()) sql_exec_dump_pgstatstatements(); if (backend_minimum_version(8, 2) && is_superuser) sql_exec_dump_xlog_stat(); PQfinish(conn); return 0; } pgstats-REL1_2_0/pgdisplay.c000066400000000000000000000303321406310430400160400ustar00rootroot00000000000000/* * pgdisplay, a PostgreSQL app to display a table * in an informative way. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2015-2021. * * pgstat/pgdisplay.c */ /* * Headers */ #include "postgres_fe.h" #include "common/string.h" #include #include #include #include #include #ifdef HAVE_GETOPT_H #include #endif #include "libpq-fe.h" #include "libpq/pqsignal.h" /* * Defines */ #define PGDISPLAY_VERSION "0.0.1" #define PGSTAT_DEFAULT_STRING_SIZE 1024 #define couleur(param) printf("\033[48;2;255;%d;%dm",param,param) #define nocouleur() printf("\033[0m") /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; char *table; int groups; int blocksize; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif PGconn *sql_conn(void); void display_fsm(char *table); void fetch_version(void); void fetch_blocksize(void); bool backend_minimum_version(int major, int minor); void allocate_struct(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s displays table in an informative way.\n\n" "Usage:\n" " %s [OPTIONS] [delay [count]]\n" "\nGeneral options:\n" " -G GROUPS # of groups of blocks\n" " -t TABLE table to display\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->groups = 20; opts->blocksize = 0; opts->table = NULL; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgdisplay " PGDISPLAY_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:p:U:d:t:G:v")) != -1) { switch (c) { /* specify the # of groups */ case 'G': opts->groups = atoi(optarg); break; /* specify the table */ case 't': opts->table = pg_strdup(optarg); break; /* don't show headers */ case 'v': opts->verbose = true; break; /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; default: errx(1, "Try \"%s --help\" for more information.\n", progname); } } if (opts->table == NULL) { fprintf(stderr, "missing table name\n"); exit(EXIT_FAILURE); } if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { fprintf(stderr, "cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Establish the PostgreSQL connection */ PGconn * sql_conn() { PGconn *my_conn; char *password = NULL; bool new_pass; #if PG_VERSION_NUM >= 90300 const char **keywords; const char **values; #else int size; char *dns; #endif /* * Start the connection. Loop until we have a password if requested by * backend. */ do { #if PG_VERSION_NUM >= 90300 /* * We don't need to check if the database name is actually a complete * connection string, PQconnectdbParams being smart enough to check * this itself. */ #define PARAMS_ARRAY_SIZE 8 keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = opts->hostname, keywords[1] = "port"; values[1] = opts->port; keywords[2] = "user"; values[2] = opts->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = opts->dbname; keywords[5] = "fallback_application_name"; values[5] = "pgdisplay"; keywords[7] = NULL; values[7] = NULL; my_conn = PQconnectdbParams(keywords, values, true); #else /* 34 is the length of the fallback application name setting */ size = 34; if (opts->hostname) size += strlen(opts->hostname) + 6; if (opts->port) size += strlen(opts->port) + 6; if (opts->username) size += strlen(opts->username) + 6; if (opts->dbname) size += strlen(opts->dbname) + 8; dns = pg_malloc(size); /* * Checking the presence of a = sign is our way to check that the * database name is actually a connection string. In such a case, we * keep this string as the connection string, and add other parameters * if they are supplied. */ sprintf(dns, "%s", "fallback_application_name='pgdisplay' "); if (strchr(opts->dbname, '=') != NULL) sprintf(dns, "%s%s", dns, opts->dbname); else if (opts->dbname) sprintf(dns, "%sdbname=%s ", dns, opts->dbname); if (opts->hostname) sprintf(dns, "%shost=%s ", dns, opts->hostname); if (opts->port) sprintf(dns, "%sport=%s ", dns, opts->port); if (opts->username) sprintf(dns, "%suser=%s ", dns, opts->username); if (opts->verbose) printf("Connection string: %s\n", dns); my_conn = PQconnectdb(dns); #endif new_pass = false; if (!my_conn) { errx(1, "could not connect to database %s\n", opts->dbname); } #if PG_VERSION_NUM >= 80200 if (PQstatus(my_conn) == CONNECTION_BAD && PQconnectionNeedsPassword(my_conn) && !password) { PQfinish(my_conn); #if PG_VERSION_NUM < 100000 password = simple_prompt("Password: ", 100, false); #elif PG_VERSION_NUM < 140000 simple_prompt("Password: ", password, 100, false); #else password = simple_prompt("Password: ", false); #endif new_pass = true; } #endif } while (new_pass); if (password) free(password); /* check to see that the backend connection was successfully made */ if (PQstatus(my_conn) == CONNECTION_BAD) { errx(1, "could not connect to database %s: %s", opts->dbname, PQerrorMessage(my_conn)); PQfinish(my_conn); } /* return the conn if good */ return my_conn; } /* * Dump all archiver stats. */ void display_fsm(char *table) { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; int color; int totalspace, freespace; int groupby, blocksize; int n; blocksize = 8192; /* grab the stats (this is the only stats on one line) */ /* snprintf(sql, sizeof(sql), "with fsm as (select blkno/443 as blockrange, sum(avail) as available, 8192*443 as total from pg_freespace('%s') group by 1)" "select blockrange, available, total, 100*available/total as ratio, 180*available/total as color from fsm order by 1", table); */ snprintf(sql, sizeof(sql), "select avail from pg_freespace('%s') order by blkno", table); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* initialize some vars */ totalspace = nrows*blocksize; if (nrows <= opts->groups) groupby = 1; else groupby = nrows/opts->groups; freespace = 0; n = 0; printf("Pages #: %d\n", nrows); printf("Table size: %d\n", totalspace); printf("... group of %d\n", groupby); printf("\n\n"); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* getting new values */ freespace += atol(PQgetvalue(res, row, 0)); if (++n >= groupby) { //printf("Free space [%d] : %d (on %d)\n", n, freespace, groupby*blocksize); /* printing the diff... * note that the first line will be the current value, rather than the diff */ color = 180*freespace/(8192*groupby); if (color<0) color = 0; couleur(color); printf(" "); nocouleur(); freespace = 0; n = 0; } } printf("\n\n"); /* cleanup */ PQclear(res); } /* * Fetch block size. */ void fetch_blocksize() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT current_setting('block_size')"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ opts->blocksize = atoi(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("Detected block size: %d\n", opts->blocksize); /* cleanup */ PQclear(res); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgdisplay: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgdisplay: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { PQfinish(conn); exit(1); } /* * Main function */ int main(int argc, char **argv) { /* * If the user stops the program (control-Z) and then resumes it, * print out the header again. */ pqsignal(SIGINT, quit_properly); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Connect to the database */ conn = sql_conn(); // check last vacuum timestamp // fetch blocks count fetch_blocksize(); display_fsm(opts->table); PQfinish(conn); return 0; } pgstats-REL1_2_0/pgreport.c000066400000000000000000000510101406310430400157020ustar00rootroot00000000000000/* * pgreport, a PostgreSQL app to get lots of informations from PostgreSQL * metadata and statistics. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2020-2021. * * pgstats/pgreport.c */ /* * Headers */ #include "postgres_fe.h" #include "common/string.h" #include #include #include #include #include #include #ifdef HAVE_GETOPT_H #include #endif #include "fe_utils/print.h" #include "libpq-fe.h" #include "libpq/pqsignal.h" #include "pgreport_queries.h" /* * Defines */ #define PGREPORT_VERSION "1.2.0" #define PGREPORT_DEFAULT_LINES 20 #define PGREPORT_DEFAULT_STRING_SIZE 2048 /* * Structs */ /* these are the options structure for command line parameters */ struct options { /* misc */ char *script; bool verbose; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif PGconn *sql_conn(void); bool backend_minimum_version(int major, int minor); void execute(char *query); void install_extension(char *extension); void fetch_version(void); void fetch_postmaster_reloadconftime(void); void fetch_postmaster_starttime(void); void fetch_table(char *label, char *query); void fetch_file(char *filename); void fetch_kernelconfig(char *cfg); void exec_command(char *cmd); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s gets lots of informations from PostgreSQL metadata and statistics.\n\n" "Usage:\n" " %s [OPTIONS]\n" "\nGeneral options:\n" " -s VERSION generate SQL script for $VERSION release\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->script = NULL; opts->verbose = false; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; /* we should deal quickly with help and version */ if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgreport " PGREPORT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get options */ while ((c = getopt(argc, argv, "h:p:U:d:vs:")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* get script */ case 's': opts->script = pg_strdup(optarg); sscanf(opts->script, "%d.%d", &(opts->major), &(opts->minor)); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; /* get verbose */ case 'v': opts->verbose = true; break; default: errx(1, "Try \"%s --help\" for more information.\n", progname); } } /* set dbname if unset */ if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment, preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { fprintf(stderr, "cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Establish the PostgreSQL connection */ PGconn * sql_conn() { PGconn *my_conn; char *password = NULL; bool new_pass; #if PG_VERSION_NUM >= 90300 const char **keywords; const char **values; #else int size; char *dns; #endif char *message; /* * Start the connection. Loop until we have a password if requested by * backend. */ do { #if PG_VERSION_NUM >= 90300 /* * We don't need to check if the database name is actually a complete * connection string, PQconnectdbParams being smart enough to check * this itself. */ #define PARAMS_ARRAY_SIZE 8 keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = opts->hostname, keywords[1] = "port"; values[1] = opts->port; keywords[2] = "user"; values[2] = opts->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = opts->dbname; keywords[5] = "fallback_application_name"; values[5] = "pgreport"; keywords[7] = NULL; values[7] = NULL; my_conn = PQconnectdbParams(keywords, values, true); #else /* 34 is the length of the fallback application name setting */ size = 34; if (opts->hostname) size += strlen(opts->hostname) + 6; if (opts->port) size += strlen(opts->port) + 6; if (opts->username) size += strlen(opts->username) + 6; if (opts->dbname) size += strlen(opts->dbname) + 8; dns = pg_malloc(size); /* * Checking the presence of a = sign is our way to check that the * database name is actually a connection string. In such a case, we * keep this string as the connection string, and add other parameters * if they are supplied. */ sprintf(dns, "%s", "fallback_application_name='pgreport' "); if (strchr(opts->dbname, '=') != NULL) sprintf(dns, "%s%s", dns, opts->dbname); else if (opts->dbname) sprintf(dns, "%sdbname=%s ", dns, opts->dbname); if (opts->hostname) sprintf(dns, "%shost=%s ", dns, opts->hostname); if (opts->port) sprintf(dns, "%sport=%s ", dns, opts->port); if (opts->username) sprintf(dns, "%suser=%s ", dns, opts->username); if (opts->verbose) printf("Connection string: %s\n", dns); my_conn = PQconnectdb(dns); #endif new_pass = false; if (!my_conn) { errx(1, "could not connect to database %s\n", opts->dbname); } #if PG_VERSION_NUM >= 80200 if (PQstatus(my_conn) == CONNECTION_BAD && PQconnectionNeedsPassword(my_conn) && !password) { PQfinish(my_conn); #if PG_VERSION_NUM < 100000 password = simple_prompt("Password: ", 100, false); #elif PG_VERSION_NUM < 140000 simple_prompt("Password: ", password, 100, false); #else password = simple_prompt("Password: ", false); #endif new_pass = true; } #endif } while (new_pass); if (password) free(password); /* check to see that the backend connection was successfully made */ if (PQstatus(my_conn) == CONNECTION_BAD) { message = PQerrorMessage(my_conn); PQfinish(my_conn); errx(1, "could not connect to database %s: %s", opts->dbname, message); } /* return the conn if good */ return my_conn; } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Execute query */ void execute(char *query) { PGresult *results; if (opts->script) { printf("%s;\n", query); } else { /* make the call */ results = PQexec(conn, query); /* check and deal with errors */ if (!results) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(results); PQfinish(conn); errx(1, "pgreport: query was: %s", query); } /* cleanup */ PQclear(results); } } /* * Install extension */ void install_extension(char *extension) { char check_sql[PGREPORT_DEFAULT_STRING_SIZE], install_sql[PGREPORT_DEFAULT_STRING_SIZE]; PGresult *check_res, *install_res; if (opts->script) { printf("CREATE EXTENSION IF NOT EXISTS %s;\n", extension); } else { /* check if extension is already installed */ snprintf(check_sql, sizeof(check_sql), "SELECT 1 " "FROM pg_available_extensions " "WHERE name='%s' " " AND installed_version IS NOT NULL", extension); /* make the call */ check_res = PQexec(conn, check_sql); /* check and deal with errors */ if (!check_res || PQresultStatus(check_res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(check_res); PQfinish(conn); errx(1, "pgreport: query was: %s", check_sql); } if (PQntuples(check_res) == 0) { /* check if extension is already installed */ snprintf(install_sql, sizeof(install_sql), "create extension %s", extension); /* make the call */ install_res = PQexec(conn, install_sql); /* install and deal with errors */ if (!install_res || PQresultStatus(install_res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(install_res); PQfinish(conn); errx(1, "pgreport: query was: %s", install_sql); } /* cleanup */ PQclear(install_res); } /* cleanup */ PQclear(check_res); } } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGREPORT_DEFAULT_STRING_SIZE]; PGresult *res; if (opts->script) { printf("\\echo PostgreSQL version\n"); printf("SELECT version();\n"); } else { /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgreport: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); printf ("PostgreSQL version: %s\n", PQgetvalue(res, 0, 0)); /* cleanup */ PQclear(res); } } /* * Fetch PostgreSQL reload configuration time */ void fetch_postmaster_reloadconftime() { char sql[PGREPORT_DEFAULT_STRING_SIZE]; PGresult *res; if (opts->script) { printf("\\echo PostgreSQL reload conf time\n"); printf("SELECT pg_conf_load_time();\n"); } else { /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT pg_conf_load_time()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgreport: query was: %s", sql); } printf ("PostgreSQL reload conf time: %s\n", PQgetvalue(res, 0, 0)); /* cleanup */ PQclear(res); } } /* * Fetch PostgreSQL start time */ void fetch_postmaster_starttime() { char sql[PGREPORT_DEFAULT_STRING_SIZE]; PGresult *res; if (opts->script) { printf("\\echo PostgreSQL start time\n"); printf("SELECT pg_postmaster_start_time();\n"); } else { /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT pg_postmaster_start_time()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgreport: query was: %s", sql); } printf ("PostgreSQL start time: %s\n", PQgetvalue(res, 0, 0)); /* cleanup */ PQclear(res); } } /* * Handle query */ void fetch_table(char *label, char *query) { PGresult *res; printQueryOpt myopt; if (opts->script) { printf("\\echo %s\n",label); printf("%s;\n",query); } else { myopt.nullPrint = NULL; myopt.title = pstrdup(label); myopt.translate_header = false; myopt.n_translate_columns = 0; myopt.translate_columns = NULL; myopt.footers = NULL; myopt.topt.format = PRINT_ALIGNED; myopt.topt.expanded = 0; myopt.topt.border = 2; myopt.topt.pager = 0; myopt.topt.tuples_only = false; myopt.topt.start_table = true; myopt.topt.stop_table = true; myopt.topt.default_footer = false; myopt.topt.line_style = NULL; //myopt.topt.fieldSep = NULL; //myopt.topt.recordSep = NULL; myopt.topt.numericLocale = false; myopt.topt.tableAttr = NULL; myopt.topt.encoding = PQenv2encoding(); myopt.topt.env_columns = 0; //myopt.topt.columns = 3; myopt.topt.unicode_border_linestyle = UNICODE_LINESTYLE_SINGLE; myopt.topt.unicode_column_linestyle = UNICODE_LINESTYLE_SINGLE; myopt.topt.unicode_header_linestyle = UNICODE_LINESTYLE_SINGLE; /* execute it */ res = PQexec(conn, query); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgreport: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgreport: query was: %s", query); } /* print results */ printQuery(res, &myopt, stdout, false, NULL); /* cleanup */ PQclear(res); } } void fetch_kernelconfig(char *cfg) { char *filename; filename = pg_malloc(strlen("/proc/sys/vm/")+strlen(cfg)); sprintf(filename, "/proc/sys/vm/%s", cfg); printf("%s : ", cfg); fetch_file(filename); printf("\n"); pg_free(filename); } void fetch_file(char *filename) { FILE *fp; char ch; fp = fopen(filename, "r"); // read mode if (fp == NULL) { perror("Error while opening the file.\n"); exit(EXIT_FAILURE); } while((ch = fgetc(fp)) != EOF) { printf("%c", ch); } fclose(fp); } void exec_command(char *cmd) { int filedes[2]; pid_t pid; char *buffer; ssize_t count; if (pipe(filedes) == -1) { perror("pipe"); exit(1); } pid = fork(); if (pid == -1) { perror("fork"); exit(1); } else if (pid == 0) { while ((dup2(filedes[1], STDOUT_FILENO) == -1) && (errno == EINTR)) {} close(filedes[1]); close(filedes[0]); execl(cmd, cmd, (char*)0); perror("execl"); _exit(1); } close(filedes[1]); buffer = (char *) pg_malloc(1); while (1) { count = read(filedes[0], buffer, sizeof(buffer)); if (count == -1) { if (errno == EINTR) { continue; } else { perror("read"); exit(1); } } else if (count == 0) { break; } else { printf("%s", buffer); } } close(filedes[0]); pg_free(buffer); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { PQfinish(conn); exit(1); } /* * Main function */ int main(int argc, char **argv) { char sql[10240]; /* * If the user stops the program, * quit nicely. */ pqsignal(SIGINT, quit_properly); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); if (opts->script) { printf("\\echo =================================================================================\n"); printf("\\echo == pgreport SQL script for a %s release =========================================\n", opts->script); printf("\\echo =================================================================================\n"); printf("SET application_name to 'pgreport';\n"); } else { /* connect to the database */ conn = sql_conn(); } /* Create schema, and set if as our search_path */ execute(CREATE_SCHEMA); execute(SET_SEARCHPATH); /* Install some extensions if they are not already there */ install_extension("pg_buffercache"); /* Install some functions/views */ execute(CREATE_GETVALUE_FUNCTION_SQL); execute(CREATE_BLOATTABLE_VIEW_SQL); strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_1); strcat(sql, CREATE_BLOATINDEX_VIEW_SQL_2); execute(sql); execute(CREATE_ORPHANEDFILES_VIEW_SQL); /* Fetch version */ printf("%s# PostgreSQL Version\n\n", opts->script ? "\\echo " : ""); fetch_version(); printf("\n"); /* Fetch postmaster start time */ printf("%s# PostgreSQL Start time\n\n", opts->script ? "\\echo " : ""); fetch_postmaster_starttime(); printf("\n"); /* Fetch reload conf time */ printf("%s# PostgreSQL Reload conf time\n\n", opts->script ? "\\echo " : ""); fetch_postmaster_reloadconftime(); printf("\n"); /* Fetch settings by various ways */ printf("%s# PostgreSQL Configuration\n\n", opts->script ? "\\echo " : ""); fetch_table(SETTINGS_BY_SOURCEFILE_TITLE, SETTINGS_BY_SOURCEFILE_SQL); fetch_table(SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE, SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL); if (backend_minimum_version(9,5)) { fetch_table(PGFILESETTINGS_TITLE, PGFILESETTINGS_SQL); } if (backend_minimum_version(10,0)) { fetch_table(PGHBAFILERULES_TITLE, PGHBAFILERULES_SQL); } fetch_table(PGSETTINGS_TITLE, PGSETTINGS_SQL); /* Fetch global objects */ printf("%s# Global objects\n\n", opts->script ? "\\echo " : ""); fetch_table(CLUSTER_HITRATIO_TITLE, CLUSTER_HITRATIO_SQL); fetch_table(CLUSTER_BUFFERSUSAGE_TITLE, CLUSTER_BUFFERSUSAGE_SQL); fetch_table(CLUSTER_BUFFERSUSAGEDIRTY_TITLE, CLUSTER_BUFFERSUSAGEDIRTY_SQL); fetch_table(DATABASES_TITLE, DATABASES_SQL); fetch_table(DATABASES_IN_CACHE_TITLE, DATABASES_IN_CACHE_SQL); fetch_table(TABLESPACES_TITLE, TABLESPACES_SQL); fetch_table(ROLES_TITLE, backend_minimum_version(9,5) ? ROLES_SQL_95min : ROLES_SQL_94max); fetch_table(USER_PASSWORDS_TITLE, USER_PASSWORDS_SQL); fetch_table(DATABASEUSER_CONFIG_TITLE, DATABASEUSER_CONFIG_SQL); /* Fetch local objects of the current database */ printf("%s# Local objects in database %s\n\n", opts->script ? "\\echo " : "", opts->dbname); fetch_table(SCHEMAS_TITLE, SCHEMAS_SQL); fetch_table(NBRELS_IN_SCHEMA_TITLE, NBRELS_IN_SCHEMA_SQL); if (backend_minimum_version(11,0)) { fetch_table(NBFUNCSPROCS_IN_SCHEMA_TITLE, NBFUNCSPROCS_IN_SCHEMA_SQL); } else { fetch_table(NBFUNCS_IN_SCHEMA_TITLE, NBFUNCS_IN_SCHEMA_SQL); } fetch_table(HEAPTOAST_SIZE_TITLE, HEAPTOAST_SIZE_SQL); fetch_table(EXTENSIONS_TITLE, EXTENSIONS_SQL); fetch_table(KINDS_SIZE_TITLE, KINDS_SIZE_SQL); fetch_table(DEPENDENCIES_TITLE, DEPENDENCIES_SQL); fetch_table(KINDS_IN_CACHE_TITLE, KINDS_IN_CACHE_SQL); fetch_table(AM_SIZE_TITLE, AM_SIZE_SQL); fetch_table(INDEXTYPE_TITLE, INDEXTYPE_SQL); fetch_table(PERCENTUSEDINDEXES_TITLE, PERCENTUSEDINDEXES_SQL); fetch_table(UNUSEDINDEXES_TITLE, UNUSEDINDEXES_SQL); fetch_table(REDUNDANTINDEXES_TITLE, REDUNDANTINDEXES_SQL); fetch_table(ORPHANEDFILES_TITLE, ORPHANEDFILES_SQL); fetch_table(NBFUNCS_TITLE, NBFUNCS_SQL); if (backend_minimum_version(11,0)) { fetch_table(FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE, FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL); } else { fetch_table(FUNCS_PER_SCHEMA_TITLE, FUNCS_PER_SCHEMA_SQL); } fetch_table(LOBJ_TITLE, LOBJ_SQL); fetch_table(LOBJ_STATS_TITLE, LOBJ_STATS_SQL); fetch_table(RELOPTIONS_TITLE, RELOPTIONS_SQL); fetch_table(NEEDVACUUM_TITLE, NEEDVACUUM_SQL); fetch_table(NEEDANALYZE_TITLE, NEEDANALYZE_SQL); fetch_table(MINAGE_TITLE, MINAGE_SQL); fetch_table(TOBEFROZEN_TABLES_TITLE, TOBEFROZEN_TABLES_SQL); fetch_table(BLOATOVERVIEW_TITLE, BLOATOVERVIEW_SQL); fetch_table(TOP20BLOAT_TABLES_TITLE, TOP20BLOAT_TABLES_SQL); fetch_table(TOP20BLOAT_INDEXES_TITLE, TOP20BLOAT_INDEXES_SQL); fetch_table(REPSLOTS_TITLE, REPSLOTS_SQL); if (backend_minimum_version(10,0)) { fetch_table(PUBLICATIONS_TITLE, PUBLICATIONS_SQL); fetch_table(SUBSCRIPTIONS_TITLE, SUBSCRIPTIONS_SQL); } /* fetch_table(TOP10QUERYIDS_TITLE, TOP10QUERYIDS_SQL); fetch_table(TOP10QUERIES_TITLE, TOP10QUERIES_SQL); */ /* * Uninstall all * Actually, it drops our schema, which should get rid of all our stuff */ execute(DROP_ALL); if (opts->script) { /* Drop the function */ PQfinish(conn); } pg_free(opts); return 0; } pgstats-REL1_2_0/pgreport_queries.h000066400000000000000000000525451406310430400174620ustar00rootroot00000000000000#define SETTINGS_BY_SOURCEFILE_TITLE "Settings by source file" #define SETTINGS_BY_SOURCEFILE_SQL "SELECT source, sourcefile, count(*) AS nb FROM pg_settings GROUP BY 1, 2" #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_TITLE "Non default value and not config file settings" #define SETTINGS_NOTCONFIGFILE_NOTDEFAULTVALUE_SQL "SELECT source, name, setting, unit FROM pg_settings WHERE source NOT IN ('configuration file', 'default') ORDER BY source, name" #define CLUSTER_HITRATIO_TITLE "Hit ratio" #define CLUSTER_HITRATIO_SQL "SELECT 'index hit rate' AS name, 100.*sum(idx_blks_hit) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio FROM pg_statio_user_indexes UNION ALL SELECT 'table hit rate' AS name, 100.*sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio FROM pg_statio_user_tables" #define CLUSTER_BUFFERSUSAGE_TITLE "Buffers Usage" #define CLUSTER_BUFFERSUSAGE_SQL "SELECT usagecount, count(*) FROM pg_buffercache GROUP BY 1 ORDER BY 1" #define CLUSTER_BUFFERSUSAGEDIRTY_TITLE "Buffers Usage with dirty" #define CLUSTER_BUFFERSUSAGEDIRTY_SQL "SELECT usagecount, isdirty, count(*) FROM pg_buffercache GROUP BY 1,2 ORDER BY 1,2" #define DATABASES_TITLE "Databases" #define DATABASES_SQL "SELECT d.datname as \"Name\", pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\", pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\", d.datcollate as \"Collate\", d.datctype as \"Ctype\", pg_catalog.array_to_string(d.datacl, E'\n') AS \"Access privileges\", CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname)) ELSE 'No Access' END as \"Size\", t.spcname as \"Tablespace\", pg_catalog.shobj_description(d.oid, 'pg_database') as \"Description\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid ORDER BY 1" #define DATABASES_IN_CACHE_TITLE "Databases in cache" #define DATABASES_IN_CACHE_SQL "SELECT CASE WHEN datname IS NULL THEN '' ELSE datname END AS datname, pg_size_pretty(count(*)*8192) FROM pg_buffercache bc LEFT JOIN pg_database d ON d.oid=bc.reldatabase GROUP BY 1 ORDER BY count(*) DESC" #define TABLESPACES_TITLE "Tablespaces" #define TABLESPACES_SQL "SELECT spcname AS \"Name\", pg_catalog.pg_get_userbyid(spcowner) AS \"Owner\", pg_catalog.pg_tablespace_location(oid) AS \"Location\", pg_size_pretty(pg_tablespace_size(oid)) AS \"Size\", pg_catalog.array_to_string(spcacl, E'\n') AS \"Access privileges\", spcoptions AS \"Options\", pg_catalog.shobj_description(oid, 'pg_tablespace') AS \"Description\" FROM pg_catalog.pg_tablespace ORDER BY 1" #define ROLES_TITLE "Roles" #define ROLES_SQL_94max "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" #define ROLES_SQL_95min "SELECT r.rolname, r.rolsuper, r.rolinherit, r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, r.rolconnlimit, r.rolvaliduntil, ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as memberof, r.rolreplication, r.rolbypassrls FROM pg_catalog.pg_roles r WHERE r.rolname !~ '^pg_' ORDER BY 1" #define USER_PASSWORDS_TITLE "User passwords" #define USER_PASSWORDS_SQL "SELECT usename, valuntil, CASE WHEN passwd IS NULL THEN '' else passwd END AS passwd FROM pg_catalog.pg_shadow ORDER BY 1" #define DATABASEUSER_CONFIG_TITLE "Databases and users specific configuration" #define DATABASEUSER_CONFIG_SQL "select datname, rolname, setconfig from pg_db_role_setting drs left join pg_database d on d.oid=drs.setdatabase left join pg_roles r on r.oid=drs.setrole" #define SCHEMAS_TITLE "Schemas" #define SCHEMAS_SQL "SELECT n.nspname AS \"Name\", pg_catalog.pg_get_userbyid(n.nspowner) AS \"Owner\" FROM pg_catalog.pg_namespace n WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1" #define NBRELS_IN_SCHEMA_TITLE "Relations per kinds and schemas" #define NBRELS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where relkind='r') as tables, count(*) filter (where relkind='t') as toasts, count(*) filter (where relkind='i') as index, count(*) filter (where relkind='s') as sequences from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_class c on n.oid=c.relnamespace group by nspname, rolname order by 1, 2" #define NBFUNCS_IN_SCHEMA_TITLE "Functions per schema" #define NBFUNCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where p.oid is not null) as functions from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" #define NBFUNCSPROCS_IN_SCHEMA_TITLE "Routines per schema" #define NBFUNCSPROCS_IN_SCHEMA_SQL "select nspname, rolname, count(*) filter (where prokind='f') as functions, count(*) filter (where prokind='p') as procedures from pg_namespace n join pg_roles r on r.oid=n.nspowner left join pg_proc p on n.oid=p.pronamespace group by nspname, rolname order by 1, 2" #define HEAPTOAST_SIZE_TITLE "HEAP and TOAST sizes per schema" #define HEAPTOAST_SIZE_SQL "select nspname, relname, pg_relation_size(c.oid) as heap_size, pg_relation_size(reltoastrelid) as toast_size from pg_namespace n join pg_class c on n.oid=c.relnamespace where pg_relation_size(reltoastrelid)>0 order by nspname, relname" #define EXTENSIONS_TITLE "Extensions" #define EXTENSIONS_SQL "SELECT e.extname AS \"Name\", e.extversion AS \"Version\", n.nspname AS \"Schema\", c.description AS \"Description\" FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace LEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oid AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass ORDER BY 1" #define KINDS_SIZE_TITLE "Number and size per relations kinds" #define KINDS_SIZE_SQL "SELECT nspname, relkind, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) FROM pg_class c JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1,2 ORDER BY 1,2" #define DEPENDENCIES_TITLE "Dependencies" #define DEPENDENCIES_SQL "with etypes as ( select classid::regclass, objid, deptype, e.extname from pg_depend join pg_extension e on refclassid = 'pg_extension'::regclass and refobjid = e.oid where classid = 'pg_type'::regclass ) select etypes.extname, etypes.objid::regtype as type, n.nspname as schema, c.relname as table, attname as column from pg_depend join etypes on etypes.classid = pg_depend.refclassid and etypes.objid = pg_depend.refobjid join pg_class c on c.oid = pg_depend.objid join pg_namespace n on n.oid = c.relnamespace join pg_attribute attr on attr.attrelid = pg_depend.objid and attr.attnum = pg_depend.objsubid where pg_depend.classid = 'pg_class'::regclass" #define KINDS_IN_CACHE_TITLE "Relation kinds in cache" #define KINDS_IN_CACHE_SQL "select relkind, pg_size_pretty(count(*)*8192) from pg_buffercache bc left join pg_class c on c.relfilenode=bc.relfilenode group by 1 order by count(*) desc" #define AM_SIZE_TITLE "Access Methods" #define AM_SIZE_SQL "select nspname, amname, count(*), pg_size_pretty(sum(pg_table_size(c.oid))) from pg_class c join pg_am a on a.oid=c.relam join pg_namespace n on n.oid=c.relnamespace group by 1, 2 order by 1,2" #define INDEXTYPE_TITLE "Index by types" #define INDEXTYPE_SQL "SELECT nspname, count(*) FILTER (WHERE not indisunique AND not indisprimary) as standard, count(*) FILTER (WHERE indisunique AND not indisprimary) as unique, count(*) FILTER (WHERE indisprimary) as primary, count(*) FILTER (WHERE indisexclusion) as exclusion, count(*) FILTER (WHERE indisclustered) as clustered, count(*) FILTER (WHERE indisvalid) as valid FROM pg_index i JOIN pg_class c ON c.oid=i.indexrelid JOIN pg_namespace n ON n.oid=c.relnamespace GROUP BY 1;" #define NBFUNCS_TITLE "User routines" #define NBFUNCS_SQL "select count(*) from pg_proc where pronamespace=2200 or pronamespace>16383" #define FUNCSPROCS_PER_SCHEMA_AND_KIND_TITLE "Routines per schema and kind" #define FUNCSPROCS_PER_SCHEMA_AND_KIND_SQL "select n.nspname, l.lanname, p.prokind, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2, 3 order by 1, 2, 3" #define FUNCS_PER_SCHEMA_TITLE "Functions per schema and language" #define FUNCS_PER_SCHEMA_SQL "select n.nspname, l.lanname, count(*) from pg_proc p join pg_namespace n on n.oid=p.pronamespace join pg_language l on l.oid=p.prolang where pronamespace=2200 or pronamespace>16383 group by 1, 2 order by 1, 2" #define LOBJ_TITLE "Large Objects" #define LOBJ_SQL "select count(*) from pg_largeobject" #define LOBJ_STATS_TITLE "Large Objects Size" #define LOBJ_STATS_SQL "select reltuples, relpages from pg_class where relname='pg_largeobject'" #define RELOPTIONS_TITLE "Relation Options" #define RELOPTIONS_SQL "select nspname, relkind, relname, reloptions from pg_class c join pg_namespace n on n.oid=c.relnamespace where reloptions is not null order by 1, 3, 2" #define TOBEFROZEN_TABLES_TITLE "Tables to be frozen" #define TOBEFROZEN_TABLES_SQL "select count(*) from pg_class where relkind='r' and age(relfrozenxid)>current_setting('autovacuum_freeze_max_age')::integer" #define PGFILESETTINGS_TITLE "pg_file_settings" #define PGFILESETTINGS_SQL "select * from pg_file_settings " #define PGHBAFILERULES_TITLE "pg_hba_file_rules" #define PGHBAFILERULES_SQL "select * from pg_hba_file_rules" #define PUBLICATIONS_TITLE "Publications" #define PUBLICATIONS_SQL "select * from pg_publication" #define REPSLOTS_TITLE "Replication slots" #define REPSLOTS_SQL "select * from pg_replication_slots" #define SUBSCRIPTIONS_TITLE "Subscriptions" #define SUBSCRIPTIONS_SQL "select * from pg_subscription" #define PGSETTINGS_TITLE "pg_settings" #define PGSETTINGS_SQL "select * from pg_settings" #define TOP10QUERYIDS_SQL "select queryid, calls, total_time, mean_time from pg_stat_statements order by total_time desc limit 10" #define TOP10QUERIES_SQL "select queryid, query from pg_stat_statements order by total_time desc limit 10" #define PERCENTUSEDINDEXES_TITLE "Percentage usage of indexes" #define PERCENTUSEDINDEXES_SQL "SELECT relname, CASE idx_scan WHEN 0 THEN 'Insufficient data' ELSE (100 * idx_scan / (seq_scan + idx_scan))::text END percent_of_times_index_used, n_live_tup rows_in_table FROM pg_stat_user_tables ORDER BY n_live_tup DESC" #define UNUSEDINDEXES_TITLE "Unused indexes" #define UNUSEDINDEXES_SQL "select schemaname, count(*) from pg_stat_user_indexes s join pg_index i using (indexrelid) where idx_scan=0 and (not indisunique AND not indisprimary) group by 1;" #define REDUNDANTINDEXES_TITLE "Redundant indexes" #define REDUNDANTINDEXES_SQL "SELECT pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS SIZE, string_agg(idx::text, ', ') AS indexes FROM ( SELECT indexrelid::regclass AS idx, (indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'||COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')) AS KEY FROM pg_index) sub GROUP BY KEY HAVING COUNT(*)>1 ORDER BY SUM(pg_relation_size(idx)) DESC" #define MINAGE_TITLE "Min age" #define MINAGE_SQL "SELECT label, age FROM ( select 'Process #'||pid AS label, age(backend_xid) AS age from pg_stat_activity UNION select 'Process #'||pid, age(backend_xmin) from pg_stat_activity UNION select 'Prepared transaction '||gid, age(transaction) from pg_prepared_xacts UNION select 'Replication slot '||slot_name, age(xmin) from pg_replication_slots UNION select 'Replication slot '||slot_name, age(catalog_xmin) from pg_replication_slots) tmp WHERE age IS NOT NULL ORDER BY age DESC;" #define NEEDVACUUM_TITLE "Tables needing autoVACUUMs" #define NEEDVACUUM_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_dead_tup dead_tup, get_value('autovacuum_vacuum_threshold', c.reloptions, c.relkind) + get_value('autovacuum_vacuum_scale_factor', c.reloptions, c.relkind) * c.reltuples max_dead_tup, st.last_autovacuum FROM pg_stat_all_tables st, pg_class c WHERE c.oid = st.relid AND c.relkind IN ('r','m','t') AND st.n_dead_tup>0" #define NEEDANALYZE_TITLE "Tables needing autoANALYZEs" #define NEEDANALYZE_SQL "SELECT st.schemaname || '.' || st.relname tablename, st.n_mod_since_analyze mod_tup, get_value('autovacuum_analyze_threshold', c.reloptions, c.relkind) + get_value('autovacuum_analyze_scale_factor', c.reloptions, c.relkind) * c.reltuples max_mod_tup, st.last_autoanalyze FROM pg_stat_all_tables st, pg_class c WHERE c.oid = st.relid AND c.relkind IN ('r','m') AND st.n_mod_since_analyze>0" #define CREATE_GETVALUE_FUNCTION_SQL "CREATE FUNCTION get_value(param text, reloptions text[], relkind \"char\") RETURNS float AS $$ SELECT coalesce((SELECT option_value FROM pg_options_to_table(reloptions) WHERE option_name = CASE WHEN relkind = 't' THEN 'toast.' ELSE '' END || param), current_setting(param))::float; $$ LANGUAGE sql" #define CREATE_BLOATTABLE_VIEW_SQL "CREATE VIEW bloat_table AS SELECT schemaname, tblname, bs*tblpages AS real_size, (tblpages-est_tblpages)*bs AS extra_size, CASE WHEN tblpages - est_tblpages > 0 THEN 100 * (tblpages - est_tblpages)/tblpages::float ELSE 0 END AS extra_ratio, fillfactor, CASE WHEN tblpages - est_tblpages_ff > 0 THEN (tblpages-est_tblpages_ff)*bs ELSE 0 END AS bloat_size, CASE WHEN tblpages - est_tblpages_ff > 0 THEN 100 * (tblpages - est_tblpages_ff)/tblpages::float ELSE 0 END AS bloat_ratio, is_na FROM ( SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages, ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff, tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na FROM ( SELECT ( 4 + tpl_hdr_size + tpl_data_size + (2*ma) - CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END - CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END) AS tpl_size, bs - page_hdr AS size_per_block, (heappages + toastpages) AS tblpages, heappages, toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na FROM ( SELECT tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS tblname, tbl.reltuples, tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages, coalesce(toast.reltuples, 0) AS toasttuples, coalesce(substring( array_to_string(tbl.reloptions, ' ') FROM 'fillfactor=([0-9]+)')::smallint, 100) AS fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma, 24 AS page_hdr, 23 + CASE WHEN MAX(coalesce(s.null_frac,0)) > 0 THEN ( 7 + count(s.attname) ) / 8 ELSE 0::int END + CASE WHEN bool_or(att.attname = 'oid' and att.attnum < 0) THEN 4 ELSE 0 END AS tpl_hdr_size, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0) ) AS tpl_data_size, bool_or(att.atttypid = 'pg_catalog.name'::regtype) OR sum(CASE WHEN att.attnum > 0 THEN 1 ELSE 0 END) <> count(s.attname) AS is_na FROM pg_attribute AS att JOIN pg_class AS tbl ON att.attrelid = tbl.oid JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace LEFT JOIN pg_stats AS s ON s.schemaname=ns.nspname AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid WHERE NOT att.attisdropped AND tbl.relkind in ('r','m') GROUP BY 1,2,3,4,5,6,7,8,9,10 ORDER BY 2,3) AS s) AS s2) AS s3" #define CREATE_BLOATINDEX_VIEW_SQL_1 "CREATE VIEW bloat_index AS SELECT nspname AS schemaname, tblname, idxname, bs*(relpages)::bigint AS real_size, bs*(relpages-est_pages)::bigint AS extra_size, 100 * (relpages-est_pages)::float / relpages AS extra_ratio, fillfactor, CASE WHEN relpages > est_pages_ff THEN bs*(relpages-est_pages_ff) ELSE 0 END AS bloat_size, 100 * (relpages-est_pages_ff)::float / relpages AS bloat_ratio, is_na FROM ( SELECT coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)/(4+nulldatahdrwidth)::float)), 0) AS est_pages, coalesce(1 + ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0) AS est_pages_ff, bs, nspname, tblname, idxname, relpages, fillfactor, is_na FROM ( SELECT maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, ( index_tuple_hdr_bm + maxalign - CASE WHEN index_tuple_hdr_bm%maxalign = 0 THEN maxalign ELSE index_tuple_hdr_bm%maxalign END + nulldatawidth + maxalign - CASE WHEN nulldatawidth = 0 THEN 0 WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign ELSE nulldatawidth::integer%maxalign END)::numeric AS nulldatahdrwidth, pagehdr, pageopqdata, is_na FROM ( SELECT n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.idxoid, i.fillfactor, current_setting('block_size')::numeric AS bs, CASE WHEN version() ~ 'mingw32' OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS maxalign, 24 AS pagehdr, 16 AS pageopqdata, CASE WHEN max(coalesce(s.null_frac,0)) = 0 THEN 2 ELSE 2 + (( 32 + 8 - 1 ) / 8) END AS index_tuple_hdr_bm, sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) AS nulldatawidth, max( CASE WHEN i.atttypid = 'pg_catalog.name'::regtype THEN 1 ELSE 0 END ) > 0 AS is_na FROM ( SELECT ct.relname AS tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey, ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor, coalesce(a1.attnum, a2.attnum) AS attnum, coalesce(a1.attname, a2.attname) AS attname, coalesce(a1.atttypid, a2.atttypid) AS atttypid, CASE WHEN a1.attnum IS NULL THEN ic.idxname ELSE ct.relname END AS attrelname FROM ( SELECT idxname, reltuples, relpages, tbloid, idxoid, fillfactor, indkey, pg_catalog.generate_series(1,indnatts) AS attpos " #define CREATE_BLOATINDEX_VIEW_SQL_2 "FROM ( SELECT ci.relname AS idxname, ci.reltuples, ci.relpages, i.indrelid AS tbloid, i.indexrelid AS idxoid, coalesce(substring( array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) AS fillfactor, i.indnatts, pg_catalog.string_to_array(pg_catalog.textin( pg_catalog.int2vectorout(i.indkey)),' ')::int[] AS indkey FROM pg_catalog.pg_index i JOIN pg_catalog.pg_class ci ON ci.oid = i.indexrelid WHERE ci.relam=(SELECT oid FROM pg_am WHERE amname = 'btree') AND ci.relpages > 0) AS idx_data) AS ic JOIN pg_catalog.pg_class ct ON ct.oid = ic.tbloid LEFT JOIN pg_catalog.pg_attribute a1 ON ic.indkey[ic.attpos] <> 0 AND a1.attrelid = ic.tbloid AND a1.attnum = ic.indkey[ic.attpos] LEFT JOIN pg_catalog.pg_attribute a2 ON ic.indkey[ic.attpos] = 0 AND a2.attrelid = ic.idxoid AND a2.attnum = ic.attpos) i JOIN pg_catalog.pg_namespace n ON n.oid = i.relnamespace JOIN pg_catalog.pg_stats s ON s.schemaname = n.nspname AND s.tablename = i.attrelname AND s.attname = i.attname GROUP BY 1,2,3,4,5,6,7,8,9,10,11) AS rows_data_stats) AS rows_hdr_pdg_stats) AS relation_stats" #define CREATE_ORPHANEDFILES_VIEW_SQL "CREATE VIEW orphaned_files AS WITH ver AS ( select current_setting('server_version_num') pgversion, v::integer/10000||'.'||mod(v::integer,10000)/100 AS version FROM current_setting('server_version_num') v), tbl_paths AS ( SELECT tbs.oid AS tbs_oid, spcname, 'pg_tblspc/' || tbs.oid || '/' || (SELECT dir FROM pg_ls_dir('pg_tblspc/'||tbs.oid||'/',true,false) dir WHERE dir LIKE E'PG\\_'||ver.version||E'\\_%' ) as tbl_path FROM pg_tablespace tbs, ver WHERE tbs.spcname NOT IN ('pg_default','pg_global')), files AS ( SELECT d.oid AS database_oid, 0 AS tbs_oid, 'base/'||d.oid AS path, file_name AS file_name, substring(file_name from E'[0-9]+' ) AS base_name FROM pg_database d, pg_ls_dir('base/' || d.oid,true,false) AS file_name WHERE d.datname = current_database() UNION ALL SELECT d.oid, tbp.tbs_oid, tbl_path||'/'||d.oid, file_name, (substring(file_name from E'[0-9]+' )) AS base_name FROM pg_database d, tbl_paths tbp, pg_ls_dir(tbp.tbl_path||'/'|| d.oid,true,false) AS file_name WHERE d.datname = current_database()), orphans AS ( SELECT tbs_oid, base_name, file_name, current_setting('data_directory')||'/'||path||'/'||file_name as orphaned_file, pg_filenode_relation (tbs_oid,base_name::oid) as rel_without_pgclass FROM ver, files LEFT JOIN pg_class c ON (c.relfilenode::text=files.base_name OR (c.oid::text = files.base_name and c.relfilenode=0 and c.relname like 'pg_%')) WHERE c.oid IS null AND lower(file_name) NOT LIKE 'pg_%') SELECT orphaned_file, pg_size_pretty((pg_stat_file(orphaned_file)).size) as file_size, (pg_stat_file(orphaned_file)).modification as modification_date, current_database() FROM orphans WHERE rel_without_pgclass IS NULL" #define BLOATOVERVIEW_TITLE "Bloat Overview" #define BLOATOVERVIEW_SQL "SELECT 'Tables'' bloat' AS label, pg_size_pretty(sum(bloat_size)::numeric) AS bloat_size FROM bloat_table UNION SELECT 'Indexes'' bloat', pg_size_pretty(sum(bloat_size)::numeric) FROM bloat_index" #define TOP20BLOAT_TABLES_TITLE "Top 20 most fragmented tables (over 1MB)" #define TOP20BLOAT_TABLES_SQL "SELECT * FROM bloat_table WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" #define TOP20BLOAT_INDEXES_TITLE "Top 20 most fragmented indexes (over 1MB)" #define TOP20BLOAT_INDEXES_SQL "SELECT * FROM bloat_index WHERE bloat_size>1e6 ORDER BY bloat_size DESC LIMIT 20" #define ORPHANEDFILES_TITLE "Orphaned files" #define ORPHANEDFILES_SQL "SELECT * FROM orphaned_files ORDER BY file_size DESC" #define CREATE_SCHEMA "CREATE SCHEMA pgreport" #define SET_SEARCHPATH "SET search_path TO pgreport" #define DROP_ALL "DROP FUNCTION get_value(text, text[], \"char\");DROP VIEW bloat_table;DROP VIEW bloat_index;DROP VIEW orphaned_files;DROP EXTENSION pg_buffercache;DROP SCHEMA pgreport" pgstats-REL1_2_0/pgstat.c000066400000000000000000003376051406310430400153630ustar00rootroot00000000000000/* * pgstat, a PostgreSQL app to gather statistical informations * from a PostgreSQL database, and act like a vmstat tool. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2014-2021. * * pgstats/pgstat.c */ /* * Headers */ #include "postgres_fe.h" #include "common/string.h" #include #include #include #include #include #ifdef HAVE_GETOPT_H #include #endif #include "libpq-fe.h" #include "libpq/pqsignal.h" /* * Defines */ #define PGSTAT_VERSION "1.2.0" #define PGSTAT_DEFAULT_LINES 20 #define PGSTAT_DEFAULT_STRING_SIZE 1024 #define PGSTAT_OLDEST_STAT_RESET "0001-01-01" /* * Structs and enums */ typedef enum { NONE = 0, ARCHIVER, BGWRITER, BUFFERCACHE, CONNECTION, DATABASE, TABLE, TABLEIO, INDEX, FUNCTION, STATEMENT, SLRU, XLOG, TEMPFILE, REPSLOTS, WAITEVENT, WAL, PROGRESS_ANALYZE, PROGRESS_BASEBACKUP, PROGRESS_CLUSTER, PROGRESS_COPY, PROGRESS_CREATEINDEX, PROGRESS_VACUUM, PBPOOLS, PBSTATS } stat_t; /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; bool dontredisplayheader; stat_t stat; char *filter; bool human_readable; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; /* extension namespace (pg_stat_statements or pg_buffercache) */ char *namespace; /* frequency */ int interval; int count; }; /* pg_stat_archiver struct */ struct pgstatarchiver { long archived_count; /* we don't put these columns here because it makes no sense to get a diff between the new and the old values ? last_archived_wal; ? last_archived_time; */ long failed_count; /* we don't put these columns here because it makes no sense to get a diff between the new and the old values ? last_failed_wal; ? last_failed_time; */ char *stats_reset; }; /* pg_stat_bgwriter struct */ struct pgstatbgwriter { long checkpoints_timed; long checkpoints_req; long checkpoint_write_time; long checkpoint_sync_time; long buffers_checkpoint; long buffers_clean; long maxwritten_clean; long buffers_backend; long buffers_backend_fsync; long buffers_alloc; char *stats_reset; }; /* pg_stat_database struct */ struct pgstatdatabase { /* we don't put numbackends here because it makes no sense to get a diff between the new and the old values long numbackends; */ long xact_commit; long xact_rollback; long blks_read; long blks_hit; long tup_returned; long tup_fetched; long tup_inserted; long tup_updated; long tup_deleted; long conflicts; long temp_files; long temp_bytes; long deadlocks; long checksum_failures; /* checksum_last_failure */ float blk_read_time; float blk_write_time; float session_time; float active_time; float idle_in_transaction_time; long sessions; long sessions_abandoned; long sessions_fatal; long sessions_killed; char *stats_reset; }; /* pg_stat_all_tables struct */ struct pgstattable { long seq_scan; long seq_tup_read; long idx_scan; long idx_tup_fetch; long n_tup_ins; long n_tup_upd; long n_tup_del; long n_tup_hot_upd; long n_live_tup; long n_dead_tup; long n_mod_since_analyze; long n_ins_since_vacuum; /* we don't put the timestamps here because it makes no sense to get a diff between the new and the old values ? last_vacuum; ? last_autovacuum; ? last_analyze; ? last_autoanalyze; */ long vacuum_count; long autovacuum_count; long analyze_count; long autoanalyze_count; }; /* pg_statio_all_tables struct */ struct pgstattableio { long heap_blks_read; long heap_blks_hit; long idx_blks_read; long idx_blks_hit; long toast_blks_read; long toast_blks_hit; long tidx_blks_read; long tidx_blks_hit; }; /* pg_stat_all_indexes struct */ struct pgstatindex { long idx_scan; long idx_tup_read; long idx_tup_fetch; }; /* pg_stat_user_functions struct */ struct pgstatfunction { long calls; float total_time; float self_time; }; /* pg_stat_statements struct */ struct pgstatstatement { /* long userid; long dbid; long queryid; text query; */ long plans; float total_plan_time; /* float min_plan_time; float max_plan_time; float mean_plan_time; float stddev_plan_time; */ long calls; float total_exec_time; /* float min_exec_time; float max_exec_time; float mean_exec_time; float stddev_exec_time; */ long rows; long shared_blks_hit; long shared_blks_read; long shared_blks_dirtied; long shared_blks_written; long local_blks_hit; long local_blks_read; long local_blks_dirtied; long local_blks_written; long temp_blks_read; long temp_blks_written; float blk_read_time; float blk_write_time; long wal_records; long wal_fpi; long wal_bytes; }; /* pg_stat_slru struct */ struct pgstatslru { long blks_zeroed; long blks_hit; long blks_read; long blks_written; long blks_exists; long flushes; long truncates; char *stats_reset; }; /* pg_stat_wal struct */ struct pgstatwal { long wal_records; long wal_fpi; long wal_bytes; long wal_buffers_full; long wal_write; long wal_sync; float wal_write_time; float wal_sync_time; char *stats_reset; }; /* repslots struct */ struct repslots { char *currentlocation; char *restartlsn; long restartlsndiff; }; /* xlogstats struct */ struct xlogstats { char *location; long locationdiff; }; /* pgBouncer stats struct */ struct pgbouncerstats { long total_request; long total_received; long total_sent; long total_query_time; /* not used yet float avg_req; float avg_recv; float avg_sent; float avg_query; */ }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; struct pgstatarchiver *previous_pgstatarchiver; struct pgstatbgwriter *previous_pgstatbgwriter; struct pgstatdatabase *previous_pgstatdatabase; struct pgstattable *previous_pgstattable; struct pgstattableio *previous_pgstattableio; struct pgstatindex *previous_pgstatindex; struct pgstatfunction *previous_pgstatfunction; struct pgstatstatement *previous_pgstatstatement; struct pgstatslru *previous_pgstatslru; struct pgstatwal *previous_pgstatwal; struct xlogstats *previous_xlogstats; struct repslots *previous_repslots; struct pgbouncerstats *previous_pgbouncerstats; int hdrcnt = 0; volatile sig_atomic_t wresized; static int winlines = PGSTAT_DEFAULT_LINES; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif PGconn *sql_conn(void); void print_pgstatarchiver(void); void print_pgstatbgwriter(void); void print_pgstatconnection(void); void print_pgstatdatabase(void); void print_pgstattable(void); void print_pgstattableio(void); void print_pgstatindex(void); void print_pgstatfunction(void); void print_pgstatstatement(void); void print_pgstatslru(void); void print_pgstatwal(void); void print_pgstatprogressanalyze(void); void print_pgstatprogressbasebackup(void); void print_pgstatprogresscluster(void); void print_pgstatprogresscopy(void); void print_pgstatprogresscreateindex(void); void print_pgstatprogressvacuum(void); void print_buffercache(void); void print_xlogstats(void); void print_repslotsstats(void); void print_tempfilestats(void); void print_pgstatwaitevent(void); void print_pgbouncerpools(void); void print_pgbouncerstats(void); void fetch_version(void); char *fetch_setting(char *name); void fetch_pgbuffercache_namespace(void); void fetch_pgstatstatements_namespace(void); bool backend_minimum_version(int major, int minor); void print_header(void); void print_line(void); void allocate_struct(void); static void needhdr(int dummy); static void needresize(int); void doresize(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s gathers statistics from a PostgreSQL database.\n\n" "Usage:\n" " %s [OPTIONS] [delay [count]]\n" "\nGeneral options:\n" " -f FILTER include only this object\n" " (only works for database, table, tableio,\n" " index, function, statement statistics,\n" " replication slots, and slru)\n" " -H display human-readable values\n" " -n do not redisplay header\n" " -s STAT stats to collect\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n" "\nThe default stat is pg_stat_bgwriter, but you can change it with\n" "the -s command line option, and one of its value (STAT):\n" " * archiver for pg_stat_archiver (only for 9.4+)\n" " * bgwriter for pg_stat_bgwriter\n" " * buffercache for pg_buffercache (needs the extension)\n" " * connection (only for 9.2+)\n" " * database for pg_stat_database\n" " * table for pg_stat_all_tables\n" " * tableio for pg_statio_all_tables\n" " * index for pg_stat_all_indexes\n" " * function for pg_stat_user_function\n" " * statement for pg_stat_statements (needs the extension)\n" " * slru for pg_stat_slru (only for 13+)\n" " * xlog for xlog writes (only for 9.2+)\n" " * repslots for replication slots\n" " * tempfile for temporary file usage\n" " * waitevent for wait events usage\n" " * wal for pg_stat_wal (only for 14+)\n" " * progress_analyze for analyze progress monitoring (only for\n" " 13+)\n" " * progress_basebackup for base backup progress monitoring (only\n" " for 13+)\n" " * progress_cluster for cluster progress monitoring (only for\n" " 12+)\n" " * progress_copy for copy progress monitoring (only for\n" " 14+)\n" " * progress_createindex for create index progress monitoring (only\n" " for 12+)\n" " * progress_vacuum for vacuum progress monitoring (only for\n" " 9.6+)\n" " * pbpools for pgBouncer pools statistics\n" " * pbstats for pgBouncer statistics\n\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->dontredisplayheader = false; opts->stat = NONE; opts->filter = NULL; opts->human_readable = false; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; opts->namespace = NULL; opts->interval = 1; opts->count = -1; if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgstats " PGSTAT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get opts */ while ((c = getopt(argc, argv, "h:Hp:U:d:f:ns:v")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* specify the filter */ case 'f': opts->filter = pg_strdup(optarg); break; /* do not redisplay the header */ case 'n': opts->dontredisplayheader = true; break; /* don't show headers */ case 'v': opts->verbose = true; break; /* specify the stat */ case 's': if (opts->stat != NONE) { errx(1, "You can only use once the -s command line switch.\n"); } if (!strcmp(optarg, "archiver")) { opts->stat = ARCHIVER; } else if (!strcmp(optarg, "bgwriter")) { opts->stat = BGWRITER; } else if (!strcmp(optarg, "buffercache")) { opts->stat = BUFFERCACHE; } else if (!strcmp(optarg, "connection")) { opts->stat = CONNECTION; } else if (!strcmp(optarg, "database")) { opts->stat = DATABASE; } else if (!strcmp(optarg, "table")) { opts->stat = TABLE; } else if (!strcmp(optarg, "tableio")) { opts->stat = TABLEIO; } else if (!strcmp(optarg, "index")) { opts->stat = INDEX; } else if (!strcmp(optarg, "function")) { opts->stat = FUNCTION; } else if (!strcmp(optarg, "statement")) { opts->stat = STATEMENT; } else if (!strcmp(optarg, "slru")) { opts->stat = SLRU; } else if (!strcmp(optarg, "wal")) { opts->stat = WAL; } else if (!strcmp(optarg, "xlog")) { opts->stat = XLOG; } else if (!strcmp(optarg, "repslots")) { opts->stat = REPSLOTS; } else if (!strcmp(optarg, "tempfile")) { opts->stat = TEMPFILE; } else if (!strcmp(optarg, "waitevent")) { opts->stat = WAITEVENT; } else if (!strcmp(optarg, "progress_analyze")) { opts->stat = PROGRESS_ANALYZE; } else if (!strcmp(optarg, "progress_basebackup")) { opts->stat = PROGRESS_BASEBACKUP; } else if (!strcmp(optarg, "progress_cluster")) { opts->stat = PROGRESS_CLUSTER; } else if (!strcmp(optarg, "progress_copy")) { opts->stat = PROGRESS_COPY; } else if (!strcmp(optarg, "progress_createindex")) { opts->stat = PROGRESS_CREATEINDEX; } else if (!strcmp(optarg, "progress_vacuum")) { opts->stat = PROGRESS_VACUUM; } else if (!strcmp(optarg, "pbpools")) { opts->stat = PBPOOLS; } else if (!strcmp(optarg, "pbstats")) { opts->stat = PBSTATS; } else { errx(1, "Unknown service \"%s\".\nTry \"%s --help\" for more information.\n", optarg, progname); } break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* display human-readable values */ case 'H': opts->human_readable = true; break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; default: errx(1, "Try \"%s --help\" for more information.\n", progname); } } if (optind < argc) { opts->interval = atoi(argv[optind]); if (opts->interval == 0) { errx(1, "Invalid delay.\nTry \"%s --help\" for more information.\n", progname); } optind++; } if (optind < argc) { opts->count = atoi(argv[optind]); if (opts -> count == 0) { errx(1, "Invalid count.\nTry \"%s --help\" for more information.\n", progname); } } if (opts->stat == PBPOOLS || opts->stat == PBSTATS) { /* * Set (or override) database name. * It should always be pgbouncer */ opts->dbname = pg_strdup("pgbouncer"); } if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { fprintf(stderr, "cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Establish the PostgreSQL connection */ PGconn * sql_conn() { PGconn *my_conn; char *password = NULL; bool new_pass; #if PG_VERSION_NUM >= 90300 const char **keywords; const char **values; #else int size; char *dns; #endif char *message; /* * Start the connection. Loop until we have a password if requested by * backend. */ do { #if PG_VERSION_NUM >= 90300 /* * We don't need to check if the database name is actually a complete * connection string, PQconnectdbParams being smart enough to check * this itself. */ #define PARAMS_ARRAY_SIZE 8 keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = opts->hostname, keywords[1] = "port"; values[1] = opts->port; keywords[2] = "user"; values[2] = opts->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = opts->dbname; keywords[5] = "fallback_application_name"; values[5] = "pgstat"; keywords[7] = NULL; values[7] = NULL; my_conn = PQconnectdbParams(keywords, values, true); #else /* 34 is the length of the fallback application name setting */ size = 34; if (opts->hostname) size += strlen(opts->hostname) + 6; if (opts->port) size += strlen(opts->port) + 6; if (opts->username) size += strlen(opts->username) + 6; if (opts->dbname) size += strlen(opts->dbname) + 8; dns = pg_malloc(size); /* * Checking the presence of a = sign is our way to check that the * database name is actually a connection string. In such a case, we * keep this string as the connection string, and add other parameters * if they are supplied. */ sprintf(dns, "%s", "fallback_application_name='pgstat' "); if (strchr(opts->dbname, '=') != NULL) sprintf(dns, "%s%s", dns, opts->dbname); else if (opts->dbname) sprintf(dns, "%sdbname=%s ", dns, opts->dbname); if (opts->hostname) sprintf(dns, "%shost=%s ", dns, opts->hostname); if (opts->port) sprintf(dns, "%sport=%s ", dns, opts->port); if (opts->username) sprintf(dns, "%suser=%s ", dns, opts->username); if (opts->verbose) printf("Connection string: %s\n", dns); my_conn = PQconnectdb(dns); #endif new_pass = false; if (!my_conn) { errx(1, "could not connect to database %s\n", opts->dbname); } #if PG_VERSION_NUM >= 80200 if (PQstatus(my_conn) == CONNECTION_BAD && PQconnectionNeedsPassword(my_conn) && !password) { PQfinish(my_conn); #if PG_VERSION_NUM < 100000 password = simple_prompt("Password: ", 100, false); #elif PG_VERSION_NUM < 140000 simple_prompt("Password: ", password, 100, false); #else password = simple_prompt("Password: ", false); #endif new_pass = true; } #endif } while (new_pass); if (password) free(password); /* check to see that the backend connection was successfully made */ if (PQstatus(my_conn) == CONNECTION_BAD) { message = PQerrorMessage(my_conn); PQfinish(my_conn); errx(1, "could not connect to database %s: %s", opts->dbname, message); } /* return the conn if good */ return my_conn; } /* * Dump all archiver stats. */ void print_pgstatarchiver() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long archived_count; long failed_count; char *stats_reset; bool has_been_reset; /* grab the stats (this is the only stats on one line) */ snprintf(sql, sizeof(sql), "SELECT archived_count, failed_count, stats_reset, stats_reset>'%s' " "FROM pg_stat_archiver ", previous_pgstatarchiver->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ archived_count = atol(PQgetvalue(res, row, column++)); failed_count = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatarchiver->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_archiver has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld\n", archived_count - previous_pgstatarchiver->archived_count, failed_count - previous_pgstatarchiver->failed_count ); /* setting the new old value */ previous_pgstatarchiver->archived_count = archived_count; previous_pgstatarchiver->failed_count = failed_count; previous_pgstatarchiver->stats_reset = stats_reset; } /* cleanup */ PQclear(res); } /* * Dump all bgwriter stats. */ void print_pgstatbgwriter() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long checkpoints_timed = 0; long checkpoints_req = 0; long checkpoint_write_time = 0; long checkpoint_sync_time = 0; long buffers_checkpoint = 0; long buffers_clean = 0; long maxwritten_clean = 0; long buffers_backend = 0; long buffers_backend_fsync = 0; long buffers_alloc = 0; char *stats_reset; bool has_been_reset; /* grab the stats (this is the only stats on one line) */ snprintf(sql, sizeof(sql), "SELECT checkpoints_timed, checkpoints_req, %sbuffers_checkpoint, buffers_clean, " "maxwritten_clean, buffers_backend, %sbuffers_alloc, stats_reset, stats_reset>'%s' " "FROM pg_stat_bgwriter ", backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "", backend_minimum_version(9, 1) ? "buffers_backend_fsync, " : "", previous_pgstatbgwriter->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ checkpoints_timed = atol(PQgetvalue(res, row, column++)); checkpoints_req = atol(PQgetvalue(res, row, column++)); if (backend_minimum_version(9, 2)) { checkpoint_write_time = atol(PQgetvalue(res, row, column++)); checkpoint_sync_time = atol(PQgetvalue(res, row, column++)); } buffers_checkpoint = atol(PQgetvalue(res, row, column++)); buffers_clean = atol(PQgetvalue(res, row, column++)); maxwritten_clean = atol(PQgetvalue(res, row, column++)); buffers_backend = atol(PQgetvalue(res, row, column++)); if (backend_minimum_version(9, 1)) { buffers_backend_fsync = atol(PQgetvalue(res, row, column++)); } buffers_alloc = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatbgwriter->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_bgwriter has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %4ld %2ld\n", checkpoints_timed - previous_pgstatbgwriter->checkpoints_timed, checkpoints_req - previous_pgstatbgwriter->checkpoints_req, checkpoint_write_time - previous_pgstatbgwriter->checkpoint_write_time, checkpoint_sync_time - previous_pgstatbgwriter->checkpoint_sync_time, buffers_checkpoint - previous_pgstatbgwriter->buffers_checkpoint, buffers_clean - previous_pgstatbgwriter->buffers_clean, buffers_backend - previous_pgstatbgwriter->buffers_backend, buffers_alloc - previous_pgstatbgwriter->buffers_alloc, maxwritten_clean - previous_pgstatbgwriter->maxwritten_clean, buffers_backend_fsync - previous_pgstatbgwriter->buffers_backend_fsync ); /* setting the new old value */ previous_pgstatbgwriter->checkpoints_timed = checkpoints_timed; previous_pgstatbgwriter->checkpoints_req = checkpoints_req; previous_pgstatbgwriter->checkpoint_write_time = checkpoint_write_time; previous_pgstatbgwriter->checkpoint_sync_time = checkpoint_sync_time; previous_pgstatbgwriter->buffers_checkpoint = buffers_checkpoint; previous_pgstatbgwriter->buffers_clean = buffers_clean; previous_pgstatbgwriter->maxwritten_clean = maxwritten_clean; previous_pgstatbgwriter->buffers_backend = buffers_backend; previous_pgstatbgwriter->buffers_backend_fsync = buffers_backend_fsync; previous_pgstatbgwriter->buffers_alloc = buffers_alloc; previous_pgstatbgwriter->stats_reset = stats_reset; } /* cleanup */ PQclear(res); } /* * Dump all connection stats. */ void print_pgstatconnection() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long total = 0; long active = 0; long lockwaiting = 0; long idleintransaction = 0; long idle = 0; if (backend_minimum_version(10, 0)) { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND wait_event IS NULL " "THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN state='active' AND wait_event IS NOT NULL " "THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity " "WHERE backend_type='client backend'"); } else if (backend_minimum_version(9, 6)) { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND wait_event IS NULL THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN state='active' AND wait_event IS NOT NULL THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity"); } else { snprintf(sql, sizeof(sql), "SELECT count(*) AS total, " " sum(CASE WHEN state='active' AND NOT waiting THEN 1 ELSE 0 END) AS active, " " sum(CASE WHEN waiting THEN 1 ELSE 0 END) AS lockwaiting, " " sum(CASE WHEN state='idle in transaction' THEN 1 ELSE 0 END) AS idleintransaction, " " sum(CASE WHEN state='idle' THEN 1 ELSE 0 END) AS idle " "FROM pg_stat_activity"); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; total = atol(PQgetvalue(res, row, column++)); active = atol(PQgetvalue(res, row, column++)); lockwaiting = atol(PQgetvalue(res, row, column++)); idleintransaction = atol(PQgetvalue(res, row, column++)); idle = atol(PQgetvalue(res, row, column++)); /* printing the actual values for once */ (void)printf(" %4ld %4ld %4ld %4ld %4ld \n", total, active, lockwaiting, idleintransaction, idle); } /* cleanup */ PQclear(res); } /* * Dump all bgwriter stats. */ void print_pgstatdatabase() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long numbackends = 0; long xact_commit = 0; long xact_rollback = 0; long blks_read = 0; long blks_hit = 0; long tup_returned = 0; long tup_fetched = 0; long tup_inserted = 0; long tup_updated = 0; long tup_deleted = 0; long conflicts = 0; long temp_files = 0; long temp_bytes = 0; long deadlocks = 0; long checksum_failures = 0; float blk_read_time = 0; float blk_write_time = 0; float session_time = 0; float active_time = 0; float idle_in_transaction_time = 0; long sessions = 0; long sessions_abandoned = 0; long sessions_fatal = 0; long sessions_killed = 0; char *stats_reset; bool has_been_reset; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(numbackends), sum(xact_commit), sum(xact_rollback), sum(blks_read), sum(blks_hit)" ", max(stats_reset), max(stats_reset)>'%s'" "%s%s%s%s%s " "FROM pg_stat_database ", previous_pgstatdatabase->stats_reset, backend_minimum_version(8, 3) ? ", sum(tup_returned), sum(tup_fetched), sum(tup_inserted), sum(tup_updated), sum(tup_deleted)" : "", backend_minimum_version(9, 1) ? ", sum(conflicts)" : "", backend_minimum_version(9, 2) ? ", sum(temp_files), sum(temp_bytes), sum(deadlocks), sum(blk_read_time), sum(blk_write_time)" : "", backend_minimum_version(12, 0) ? ", sum(checksum_failures)" : "", backend_minimum_version(14, 0) ? ", sum(session_time), sum(active_time), sum(idle_in_transaction_time), sum(sessions), sum(sessions_abandoned), sum(sessions_fatal), sum(sessions_killed)" : ""); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit" ", stats_reset, stats_reset>'%s'" "%s%s%s%s%s " "FROM pg_stat_database " "WHERE datname=$1", previous_pgstatdatabase->stats_reset, backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "", backend_minimum_version(9, 1) ? ", conflicts" : "", backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : "", backend_minimum_version(12, 0) ? ", checksum_failures" : "", backend_minimum_version(14, 0) ? ", session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed" : ""); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ numbackends = atol(PQgetvalue(res, row, column++)); xact_commit = atol(PQgetvalue(res, row, column++)); xact_rollback = atol(PQgetvalue(res, row, column++)); blks_read = atol(PQgetvalue(res, row, column++)); blks_hit = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatdatabase->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (backend_minimum_version(8, 3)) { tup_returned = atol(PQgetvalue(res, row, column++)); tup_fetched = atol(PQgetvalue(res, row, column++)); tup_inserted = atol(PQgetvalue(res, row, column++)); tup_updated = atol(PQgetvalue(res, row, column++)); tup_deleted = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 1)) { conflicts = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 2)) { temp_files = atol(PQgetvalue(res, row, column++)); temp_bytes = atol(PQgetvalue(res, row, column++)); deadlocks = atol(PQgetvalue(res, row, column++)); blk_read_time = atof(PQgetvalue(res, row, column++)); blk_write_time = atof(PQgetvalue(res, row, column++)); } if (backend_minimum_version(12, 0)) { checksum_failures = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(14, 0)) { session_time = atof(PQgetvalue(res, row, column++)); active_time = atof(PQgetvalue(res, row, column++)); idle_in_transaction_time = atof(PQgetvalue(res, row, column++)); sessions = atol(PQgetvalue(res, row, column++)); sessions_abandoned = atol(PQgetvalue(res, row, column++)); sessions_fatal = atol(PQgetvalue(res, row, column++)); sessions_killed = atol(PQgetvalue(res, row, column++)); } if (has_been_reset) { (void)printf("pg_stat_database has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %4ld %6ld %6ld %6ld %6ld %5.2f %5.2f %6ld %6ld %6ld %6ld %6ld %6ld %9ld %8.2f %8.2f %8.2f %6ld %6ld %6ld %6ld %9ld %9ld %9ld\n", numbackends, xact_commit - previous_pgstatdatabase->xact_commit, xact_rollback - previous_pgstatdatabase->xact_rollback, blks_read - previous_pgstatdatabase->blks_read, blks_hit - previous_pgstatdatabase->blks_hit, blk_read_time - previous_pgstatdatabase->blk_read_time, blk_write_time - previous_pgstatdatabase->blk_write_time, tup_returned - previous_pgstatdatabase->tup_returned, tup_fetched - previous_pgstatdatabase->tup_fetched, tup_inserted - previous_pgstatdatabase->tup_inserted, tup_updated - previous_pgstatdatabase->tup_updated, tup_deleted - previous_pgstatdatabase->tup_deleted, temp_files - previous_pgstatdatabase->temp_files, temp_bytes - previous_pgstatdatabase->temp_bytes, session_time - previous_pgstatdatabase->session_time, active_time - previous_pgstatdatabase->active_time, idle_in_transaction_time - previous_pgstatdatabase->idle_in_transaction_time, sessions - previous_pgstatdatabase->sessions, sessions_abandoned - previous_pgstatdatabase->sessions_abandoned, sessions_fatal - previous_pgstatdatabase->sessions_fatal, sessions_killed - previous_pgstatdatabase->sessions_killed, conflicts - previous_pgstatdatabase->conflicts, deadlocks - previous_pgstatdatabase->deadlocks, checksum_failures - previous_pgstatdatabase->checksum_failures ); /* setting the new old value */ previous_pgstatdatabase->xact_commit = xact_commit; previous_pgstatdatabase->xact_rollback = xact_rollback; previous_pgstatdatabase->blks_read = blks_read; previous_pgstatdatabase->blks_hit = blks_hit; previous_pgstatdatabase->tup_returned = tup_returned; previous_pgstatdatabase->tup_fetched = tup_fetched; previous_pgstatdatabase->tup_inserted = tup_inserted; previous_pgstatdatabase->tup_updated = tup_updated; previous_pgstatdatabase->tup_deleted = tup_deleted; previous_pgstatdatabase->conflicts = conflicts; previous_pgstatdatabase->temp_files = temp_files; previous_pgstatdatabase->temp_bytes = temp_bytes; previous_pgstatdatabase->deadlocks = deadlocks; previous_pgstatdatabase->blk_read_time = blk_read_time; previous_pgstatdatabase->blk_write_time = blk_write_time; previous_pgstatdatabase->checksum_failures = checksum_failures; previous_pgstatdatabase->session_time = session_time; previous_pgstatdatabase->active_time = active_time; previous_pgstatdatabase->idle_in_transaction_time = idle_in_transaction_time; previous_pgstatdatabase->sessions = sessions; previous_pgstatdatabase->sessions_abandoned = sessions_abandoned; previous_pgstatdatabase->sessions_fatal = sessions_fatal; previous_pgstatdatabase->sessions_killed = sessions_killed; if (strlen(stats_reset) == 0) previous_pgstatdatabase->stats_reset = PGSTAT_OLDEST_STAT_RESET; else previous_pgstatdatabase->stats_reset = stats_reset; } /* cleanup */ PQclear(res); } /* * Dump all table stats. */ void print_pgstattable() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long seq_scan = 0; long seq_tup_read = 0; long idx_scan = 0; long idx_tup_fetch = 0; long n_tup_ins = 0; long n_tup_upd = 0; long n_tup_del = 0; long n_tup_hot_upd = 0; long n_live_tup = 0; long n_dead_tup = 0; long n_mod_since_analyze = 0; long n_ins_since_vacuum = 0; long vacuum_count = 0; long autovacuum_count = 0; long analyze_count = 0; long autoanalyze_count = 0; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(seq_scan), sum(seq_tup_read), sum(idx_scan), sum(idx_tup_fetch), sum(n_tup_ins), " "sum(n_tup_upd), sum(n_tup_del)" "%s" "%s" "%s" "%s" " FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' ", backend_minimum_version(8, 3) ? ", sum(n_tup_hot_upd), sum(n_live_tup), sum(n_dead_tup)" : "", backend_minimum_version(9, 4) ? ", sum(n_mod_since_analyze)" : "", backend_minimum_version(13, 0) ? ", sum(n_ins_since_vacuum)" : "", backend_minimum_version(9, 1) ? ", sum(vacuum_count), sum(autovacuum_count), sum(analyze_count), sum(autoanalyze_count)" : ""); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT sum(seq_scan), sum(seq_tup_read), sum(idx_scan), sum(idx_tup_fetch), sum(n_tup_ins), " "sum(n_tup_upd), sum(n_tup_del)" "%s" "%s" "%s" "%s" " FROM pg_stat_all_tables " "WHERE schemaname <> 'information_schema' " " AND relname = $1", backend_minimum_version(8, 3) ? ", sum(n_tup_hot_upd), sum(n_live_tup), sum(n_dead_tup)" : "", backend_minimum_version(9, 4) ? ", sum(n_mod_since_analyze)" : "", backend_minimum_version(13, 0) ? ", sum(n_ins_since_vacuum)" : "", backend_minimum_version(9, 1) ? ", sum(vacuum_count), sum(autovacuum_count), sum(analyze_count), sum(autoanalyze_count)" : ""); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ seq_scan = atol(PQgetvalue(res, row, column++)); seq_tup_read = atol(PQgetvalue(res, row, column++)); idx_scan = atol(PQgetvalue(res, row, column++)); idx_tup_fetch = atol(PQgetvalue(res, row, column++)); n_tup_ins = atol(PQgetvalue(res, row, column++)); n_tup_upd = atol(PQgetvalue(res, row, column++)); n_tup_del = atol(PQgetvalue(res, row, column++)); if (backend_minimum_version(8, 3)) { n_tup_hot_upd = atol(PQgetvalue(res, row, column++)); n_live_tup = atol(PQgetvalue(res, row, column++)); n_dead_tup = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 4)) { n_mod_since_analyze = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(13, 0)) { n_ins_since_vacuum = atol(PQgetvalue(res, row, column++)); } if (backend_minimum_version(9, 1)) { vacuum_count = atol(PQgetvalue(res, row, column++)); autovacuum_count = atol(PQgetvalue(res, row, column++)); analyze_count = atol(PQgetvalue(res, row, column++)); autoanalyze_count = atol(PQgetvalue(res, row, column++)); } /* printing the diff... note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld\n", seq_scan - previous_pgstattable->seq_scan, seq_tup_read - previous_pgstattable->seq_tup_read, idx_scan - previous_pgstattable->idx_scan, idx_tup_fetch - previous_pgstattable->idx_tup_fetch, n_tup_ins - previous_pgstattable->n_tup_ins, n_tup_upd - previous_pgstattable->n_tup_upd, n_tup_del - previous_pgstattable->n_tup_del, n_tup_hot_upd - previous_pgstattable->n_tup_hot_upd, n_live_tup - previous_pgstattable->n_live_tup, n_dead_tup - previous_pgstattable->n_dead_tup, n_mod_since_analyze - previous_pgstattable->n_mod_since_analyze, n_ins_since_vacuum - previous_pgstattable->n_ins_since_vacuum, vacuum_count - previous_pgstattable->vacuum_count, autovacuum_count - previous_pgstattable->autovacuum_count, analyze_count - previous_pgstattable->analyze_count, autoanalyze_count - previous_pgstattable->autoanalyze_count ); /* setting the new old value */ previous_pgstattable->seq_scan = seq_scan; previous_pgstattable->seq_tup_read = seq_tup_read; previous_pgstattable->idx_scan = idx_scan; previous_pgstattable->idx_tup_fetch = idx_tup_fetch; previous_pgstattable->n_tup_ins = n_tup_ins; previous_pgstattable->n_tup_upd = n_tup_upd; previous_pgstattable->n_tup_del = n_tup_del; previous_pgstattable->n_tup_hot_upd = n_tup_hot_upd; previous_pgstattable->n_live_tup = n_live_tup; previous_pgstattable->n_dead_tup = n_dead_tup; previous_pgstattable->n_mod_since_analyze = n_mod_since_analyze; previous_pgstattable->n_ins_since_vacuum = n_ins_since_vacuum; previous_pgstattable->vacuum_count = vacuum_count; previous_pgstattable->autovacuum_count = autovacuum_count; previous_pgstattable->analyze_count = analyze_count; previous_pgstattable->autoanalyze_count = autoanalyze_count; } /* cleanup */ PQclear(res); } /* * Dump all table IO stats. */ void print_pgstattableio() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long heap_blks_read = 0; long heap_blks_hit = 0; long idx_blks_read = 0; long idx_blks_hit = 0; long toast_blks_read = 0; long toast_blks_hit = 0; long tidx_blks_read = 0; long tidx_blks_hit = 0; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(heap_blks_read), sum(heap_blks_hit), sum(idx_blks_read), sum(idx_blks_hit), " "sum(toast_blks_read), sum(toast_blks_hit), sum(tidx_blks_read), sum(tidx_blks_hit) " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, " "toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit " "FROM pg_statio_all_tables " "WHERE schemaname <> 'information_schema' " " AND relname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ heap_blks_read = atol(PQgetvalue(res, row, column++)); heap_blks_hit = atol(PQgetvalue(res, row, column++)); idx_blks_read = atol(PQgetvalue(res, row, column++)); idx_blks_hit = atol(PQgetvalue(res, row, column++)); toast_blks_read = atol(PQgetvalue(res, row, column++)); toast_blks_hit = atol(PQgetvalue(res, row, column++)); tidx_blks_read = atol(PQgetvalue(res, row, column++)); tidx_blks_hit = atol(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %7ld %7ld %7ld %7ld %9ld %9ld\n", heap_blks_read - previous_pgstattableio->heap_blks_read, heap_blks_hit - previous_pgstattableio->heap_blks_hit, idx_blks_read - previous_pgstattableio->idx_blks_read, idx_blks_hit - previous_pgstattableio->idx_blks_hit, toast_blks_read - previous_pgstattableio->toast_blks_read, toast_blks_hit - previous_pgstattableio->toast_blks_hit, tidx_blks_read - previous_pgstattableio->tidx_blks_read, tidx_blks_hit - previous_pgstattableio->tidx_blks_hit ); /* setting the new old value */ previous_pgstattableio->heap_blks_read = heap_blks_read; previous_pgstattableio->heap_blks_hit = heap_blks_hit; previous_pgstattableio->idx_blks_read = idx_blks_read; previous_pgstattableio->idx_blks_hit = idx_blks_hit; previous_pgstattableio->toast_blks_read = toast_blks_read; previous_pgstattableio->toast_blks_hit = toast_blks_hit; previous_pgstattableio->tidx_blks_read = tidx_blks_read; previous_pgstattableio->tidx_blks_hit = tidx_blks_hit; } /* cleanup */ PQclear(res); } /* * Dump all index stats. */ void print_pgstatindex() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long idx_scan = 0; long idx_tup_read = 0; long idx_tup_fetch = 0; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(idx_scan), sum(idx_tup_read), sum(idx_tup_fetch) " " FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT idx_scan, idx_tup_read, idx_tup_fetch " " FROM pg_stat_all_indexes " "WHERE schemaname <> 'information_schema' " " AND indexrelname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ idx_scan = atol(PQgetvalue(res, row, column++)); idx_tup_read = atof(PQgetvalue(res, row, column++)); idx_tup_fetch = atof(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %8ld %7ld %7ld\n", idx_scan - previous_pgstatindex->idx_scan, idx_tup_read - previous_pgstatindex->idx_tup_read, idx_tup_fetch - previous_pgstatindex->idx_tup_fetch ); /* setting the new old value */ previous_pgstatindex->idx_scan = idx_scan; previous_pgstatindex->idx_tup_read = idx_tup_read; previous_pgstatindex->idx_tup_fetch = idx_tup_fetch; } /* cleanup */ PQclear(res); } /* * Dump all function stats. */ void print_pgstatfunction() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long calls = 0; float total_time = 0; float self_time = 0; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(calls), sum(total_time), sum(self_time) " " FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' "); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT calls, total_time, self_time " " FROM pg_stat_user_functions " "WHERE schemaname <> 'information_schema' " " AND funcname = $1"); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ calls = atol(PQgetvalue(res, row, column++)); total_time = atof(PQgetvalue(res, row, column++)); self_time = atof(PQgetvalue(res, row, column++)); /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %9ld %5f %5f\n", calls - previous_pgstatfunction->calls, total_time - previous_pgstatfunction->total_time, self_time - previous_pgstatfunction->self_time ); /* setting the new old value */ previous_pgstatfunction->calls = calls; previous_pgstatfunction->total_time = total_time; previous_pgstatfunction->self_time = self_time; } /* cleanup */ PQclear(res); } /* * Dump all statement stats. */ void print_pgstatstatement() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; const char *paramValues[1]; PGresult *res; int nrows; int row, column; long plans = 0; float total_plan_time = 0; long calls = 0; float total_exec_time = 0; long rows = 0; long shared_blks_hit = 0; long shared_blks_read = 0; long shared_blks_dirtied = 0; long shared_blks_written = 0; long local_blks_hit = 0; long local_blks_read = 0; long local_blks_dirtied = 0; long local_blks_written = 0; long temp_blks_read = 0; long temp_blks_written = 0; float blk_read_time = 0; float blk_write_time = 0; long wal_records = 0; long wal_fpi = 0; long wal_bytes = 0; if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT %ssum(calls), sum(%s), sum(rows)," " sum(shared_blks_hit), sum(shared_blks_read), sum(shared_blks_dirtied), sum(shared_blks_written)," " sum(local_blks_hit), sum(local_blks_read), sum(local_blks_dirtied), sum(local_blks_written)," " sum(temp_blks_read), sum(temp_blks_written)," " sum(blk_read_time), sum(blk_write_time)" "%s" " FROM %s.pg_stat_statements ", backend_minimum_version(13, 0) ? "sum(plans), sum(total_plan_time), " : "", backend_minimum_version(13, 0) ? "total_exec_time" : "total_time", backend_minimum_version(13, 0) ? ", sum(wal_records), sum(wal_fpi), sum(wal_bytes)" : "", opts->namespace); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT %scalls, %s, rows," " shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written," " local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written," " temp_blks_read, temp_blks_written," " blk_read_time, blk_write_time" "%s" " FROM %s.pg_stat_statements " "WHERE queryid=$1", backend_minimum_version(13, 0) ? "plans, total_plan_time, " : "", backend_minimum_version(13, 0) ? "total_exec_time" : "total_time", backend_minimum_version(13, 0) ? ", wal_records, wal_fpi, wal_bytes" : "", opts->namespace); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ if (backend_minimum_version(13, 0)) { plans = atol(PQgetvalue(res, row, column++)); total_plan_time = atof(PQgetvalue(res, row, column++)); } calls = atol(PQgetvalue(res, row, column++)); total_exec_time = atof(PQgetvalue(res, row, column++)); rows = atol(PQgetvalue(res, row, column++)); shared_blks_hit = atol(PQgetvalue(res, row, column++)); shared_blks_read = atol(PQgetvalue(res, row, column++)); shared_blks_dirtied = atol(PQgetvalue(res, row, column++)); shared_blks_written = atol(PQgetvalue(res, row, column++)); local_blks_hit = atol(PQgetvalue(res, row, column++)); local_blks_read = atol(PQgetvalue(res, row, column++)); local_blks_dirtied = atol(PQgetvalue(res, row, column++)); local_blks_written = atol(PQgetvalue(res, row, column++)); temp_blks_read = atol(PQgetvalue(res, row, column++)); temp_blks_written = atol(PQgetvalue(res, row, column++)); blk_read_time = atof(PQgetvalue(res, row, column++)); blk_write_time = atof(PQgetvalue(res, row, column++)); if (backend_minimum_version(13, 0)) { wal_records = atol(PQgetvalue(res, row, column++)); wal_fpi = atol(PQgetvalue(res, row, column++)); wal_bytes = atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6.2f %6ld %6.2f %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6.2f %6.2f %6ld %6ld %6ld\n", plans - previous_pgstatstatement->plans, total_plan_time - previous_pgstatstatement->total_plan_time, calls - previous_pgstatstatement->calls, total_exec_time - previous_pgstatstatement->total_exec_time, rows - previous_pgstatstatement->rows, shared_blks_hit - previous_pgstatstatement->shared_blks_hit, shared_blks_read - previous_pgstatstatement->shared_blks_read, shared_blks_dirtied - previous_pgstatstatement->shared_blks_dirtied, shared_blks_written - previous_pgstatstatement->shared_blks_written, local_blks_hit - previous_pgstatstatement->local_blks_hit, local_blks_read - previous_pgstatstatement->local_blks_read, local_blks_dirtied - previous_pgstatstatement->local_blks_dirtied, local_blks_written - previous_pgstatstatement->local_blks_written, temp_blks_read - previous_pgstatstatement->temp_blks_read, temp_blks_written - previous_pgstatstatement->temp_blks_written, blk_read_time - previous_pgstatstatement->blk_read_time, blk_write_time - previous_pgstatstatement->blk_write_time, wal_records - previous_pgstatstatement->wal_records, wal_fpi - previous_pgstatstatement->wal_fpi, wal_bytes - previous_pgstatstatement->wal_bytes ); /* setting the new old value */ previous_pgstatstatement->plans = plans; previous_pgstatstatement->total_plan_time = total_plan_time; previous_pgstatstatement->calls = calls; previous_pgstatstatement->total_exec_time = total_exec_time; previous_pgstatstatement->rows = rows; previous_pgstatstatement->shared_blks_hit = shared_blks_hit; previous_pgstatstatement->shared_blks_read = shared_blks_read; previous_pgstatstatement->shared_blks_dirtied = shared_blks_dirtied; previous_pgstatstatement->shared_blks_written = shared_blks_written; previous_pgstatstatement->local_blks_hit = local_blks_hit; previous_pgstatstatement->local_blks_read = local_blks_read; previous_pgstatstatement->local_blks_dirtied = local_blks_dirtied; previous_pgstatstatement->local_blks_written = local_blks_written; previous_pgstatstatement->temp_blks_read = temp_blks_read; previous_pgstatstatement->temp_blks_written = temp_blks_written; previous_pgstatstatement->blk_read_time = blk_read_time; previous_pgstatstatement->blk_write_time = blk_write_time; previous_pgstatstatement->wal_records = wal_records; previous_pgstatstatement->wal_fpi = wal_fpi; previous_pgstatstatement->wal_bytes = wal_bytes; }; /* cleanup */ PQclear(res); } /* * Dump all SLRU stats. */ void print_pgstatslru() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; const char *paramValues[1]; int nrows; int row, column; long blks_zeroed = 0; long blks_hit = 0; long blks_read = 0; long blks_written = 0; long blks_exists = 0; long flushes = 0; long truncates = 0; char *stats_reset; bool has_been_reset; /* * With a filter, we assume we'll get only one row. * Without, we sum all the fields to get one row. */ if (opts->filter == NULL) { snprintf(sql, sizeof(sql), "SELECT sum(blks_zeroed), sum(blks_hit), sum(blks_read), sum(blks_written), " "sum(blks_exists), sum(flushes), sum(truncates), " "max(stats_reset), max(stats_reset)>'%s' " "FROM pg_stat_slru ", previous_pgstatslru->stats_reset); res = PQexec(conn, sql); } else { snprintf(sql, sizeof(sql), "SELECT sum(blks_zeroed), sum(blks_hit), sum(blks_read), sum(blks_written), " "sum(blks_exists), sum(flushes), sum(truncates), " "stats_reset, stats_reset>'%s' " "FROM pg_stat_slru " "WHERE name = $1", previous_pgstatslru->stats_reset); paramValues[0] = pg_strdup(opts->filter); res = PQexecParams(conn, sql, 1, /* one param */ NULL, /* let the backend deduce param type */ paramValues, NULL, /* don't need param lengths since text */ NULL, /* default to all text params */ 0); /* ask for text results */ } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ blks_zeroed = atol(PQgetvalue(res, row, column++)); blks_hit = atol(PQgetvalue(res, row, column++)); blks_read = atol(PQgetvalue(res, row, column++)); blks_written = atol(PQgetvalue(res, row, column++)); blks_exists = atol(PQgetvalue(res, row, column++)); flushes = atol(PQgetvalue(res, row, column++)); truncates = atol(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatslru->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_slru has been reset!\n"); } /* printing the diff... note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6ld\n", blks_zeroed - previous_pgstatslru->blks_zeroed, blks_hit - previous_pgstatslru->blks_hit, blks_read - previous_pgstatslru->blks_read, blks_written - previous_pgstatslru->blks_written, blks_exists - previous_pgstatslru->blks_exists, flushes - previous_pgstatslru->flushes, truncates - previous_pgstatslru->truncates ); /* setting the new old value */ previous_pgstatslru->blks_zeroed = blks_zeroed; previous_pgstatslru->blks_hit = blks_hit; previous_pgstatslru->blks_read = blks_read; previous_pgstatslru->blks_written = blks_written; previous_pgstatslru->blks_exists = blks_exists; previous_pgstatslru->flushes = flushes; previous_pgstatslru->truncates = truncates; previous_pgstatslru->stats_reset = stats_reset; } /* cleanup */ PQclear(res); } /* * Dump all wal stats. */ void print_pgstatwal() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long wal_records; long wal_fpi; long wal_bytes; long wal_buffers_full; long wal_write; long wal_sync; float wal_write_time; float wal_sync_time; char *stats_reset; bool has_been_reset; /* grab the stats (this is the only stats on one line) */ snprintf(sql, sizeof(sql), "SELECT wal_records, wal_fpi, wal_bytes, wal_buffers_full, " "wal_write, wal_sync, wal_write_time, wal_sync_time, " "stats_reset, stats_reset>'%s' " "FROM pg_stat_wal ", previous_pgstatwal->stats_reset); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; /* getting new values */ wal_records = atol(PQgetvalue(res, row, column++)); wal_fpi = atol(PQgetvalue(res, row, column++)); wal_bytes = atol(PQgetvalue(res, row, column++)); wal_buffers_full = atol(PQgetvalue(res, row, column++)); wal_write = atol(PQgetvalue(res, row, column++)); wal_sync = atol(PQgetvalue(res, row, column++)); wal_write_time = atof(PQgetvalue(res, row, column++)); wal_sync_time = atof(PQgetvalue(res, row, column++)); stats_reset = PQgetvalue(res, row, column++); has_been_reset = strcmp(PQgetvalue(res, row, column++), "f") && strcmp(previous_pgstatwal->stats_reset, PGSTAT_OLDEST_STAT_RESET); if (has_been_reset) { (void)printf("pg_stat_wal has been reset!\n"); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6.2f %6.2f\n", wal_records - previous_pgstatwal->wal_records, wal_fpi - previous_pgstatwal->wal_fpi, wal_bytes - previous_pgstatwal->wal_bytes, wal_buffers_full - previous_pgstatwal->wal_buffers_full, wal_write - previous_pgstatwal->wal_write, wal_sync - previous_pgstatwal->wal_sync, wal_write_time - previous_pgstatwal->wal_write_time, wal_sync_time - previous_pgstatwal->wal_sync_time ); /* setting the new old value */ previous_pgstatwal->wal_records = wal_records; previous_pgstatwal->wal_fpi = wal_fpi; previous_pgstatwal->wal_bytes = wal_bytes; previous_pgstatwal->wal_buffers_full = wal_buffers_full; previous_pgstatwal->wal_write = wal_write; previous_pgstatwal->wal_sync = wal_sync; previous_pgstatwal->wal_write_time = wal_write_time; previous_pgstatwal->wal_sync_time = wal_sync_time; previous_pgstatwal->stats_reset = stats_reset; } /* cleanup */ PQclear(res); } /* * Dump base backup progress. */ void print_pgstatprogressbasebackup() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT pid," " phase," " pg_size_pretty(backup_streamed)," " pg_size_pretty(backup_total)," " CASE WHEN backup_total>0" " THEN trunc(backup_streamed::numeric*100/backup_total,2)::text" " ELSE 'N/A' END," " CASE WHEN tablespaces_total>0" " THEN trunc(tablespaces_streamed::numeric*100/tablespaces_total,2)::text" " ELSE 'N/A' END," " (now()-query_start)::time(0) " "FROM pg_stat_progress_basebackup " "JOIN pg_stat_activity USING (pid) " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-10s %-28s %-10s %-10s %6s %6s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6) ); }; /* cleanup */ PQclear(res); } /* * Dump analyze progress. */ void print_pgstatprogressanalyze() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, relname," " pg_size_pretty(pg_table_size(relid))," " phase," " CASE WHEN sample_blks_total>0" " THEN trunc(sample_blks_scanned::numeric*100/sample_blks_total,2)::text" " ELSE 'N/A' END," " CASE WHEN ext_stats_total>0" " THEN trunc(ext_stats_computed::numeric*100/ext_stats_total,2)::text" " ELSE 'N/A' END," " CASE WHEN child_tables_total>0" " THEN trunc(child_tables_done::numeric*100/child_tables_total,2)::text" " ELSE 'N/A' END," " (now()-query_start)::time(0) " "FROM pg_stat_progress_analyze s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class c ON c.oid=s.relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %10s %-24s %6s %6s %6s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6), PQgetvalue(res, row, 7) ); }; /* cleanup */ PQclear(res); } /* * Dump cluster progress. */ void print_pgstatprogresscluster() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, t.relname, i.relname," " phase, heap_tuples_scanned, heap_tuples_written," " CASE WHEN heap_blks_total=0 THEN 'N/A' ELSE trunc(heap_blks_scanned::numeric*100/heap_blks_total,2)::text END," " index_rebuild_count," " (now()-query_start)::time(0) " "FROM pg_stat_progress_cluster s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=s.relid " "LEFT JOIN pg_class i ON i.oid=s.cluster_index_relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-20s %-46s %12ld %12ld %5s %10ld %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), atol(PQgetvalue(res, row, 4)), atol(PQgetvalue(res, row, 5)), PQgetvalue(res, row, 6), atol(PQgetvalue(res, row, 7)), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump copy progress. */ void print_pgstatprogresscopy() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT pc.datname, t.relname," " command, type," " bytes_processed, bytes_total, tuples_processed, tuples_excluded," " (now()-query_start)::time(0) " "FROM pg_stat_progress_copy pc " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=pc.relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-23s %-20s %10ld %10ld %10ld %10ld %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), atol(PQgetvalue(res, row, 4)), atol(PQgetvalue(res, row, 5)), atol(PQgetvalue(res, row, 6)), atol(PQgetvalue(res, row, 7)), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump index creation progress. */ void print_pgstatprogresscreateindex() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, t.relname, i.relname," " phase," " CASE WHEN lockers_total=0 THEN 'N/A' ELSE trunc(lockers_done::numeric*100/lockers_total,2)::text END," " CASE WHEN blocks_total=0 THEN 'N/A' ELSE trunc(blocks_done::numeric*100/blocks_total,2)::text END," " CASE WHEN tuples_total=0 THEN 'N/A' ELSE trunc(tuples_done::numeric*100/tuples_total,2)::text END," " CASE WHEN partitions_total=0 THEN 'N/A' ELSE trunc(partitions_done::numeric*100/partitions_total,2)::text END, " " (now()-query_start)::time(0) " "FROM pg_stat_progress_create_index s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class t ON t.oid=s.relid " "LEFT JOIN pg_class i ON i.oid=s.index_relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %-20s %-46s %5s %5s %5s %5s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), PQgetvalue(res, row, 6), PQgetvalue(res, row, 7), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump vacuum progress. */ void print_pgstatprogressvacuum() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT s.datname, relname," " pg_size_pretty(pg_table_size(relid))," " phase," " CASE WHEN heap_blks_total=0 THEN 'N/A' ELSE trunc(heap_blks_scanned::numeric*100/heap_blks_total,2)::text END," " CASE WHEN heap_blks_total=0 THEN 'N/A' ELSE trunc(heap_blks_vacuumed::numeric*100/heap_blks_total,2)::text END," " index_vacuum_count," " CASE WHEN max_dead_tuples=0 THEN 'N/A' ELSE trunc(num_dead_tuples::numeric*100/max_dead_tuples,2)::text END," " (now()-query_start)::time(0) " "FROM pg_stat_progress_vacuum s " "JOIN pg_stat_activity USING (pid) " "LEFT JOIN pg_class c ON c.oid=s.relid " "ORDER BY pid"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* printing the value... */ (void)printf(" %-16s %-20s %10s %-24s %5s %5s %5ld %5s %s\n", PQgetvalue(res, row, 0), PQgetvalue(res, row, 1), PQgetvalue(res, row, 2), PQgetvalue(res, row, 3), PQgetvalue(res, row, 4), PQgetvalue(res, row, 5), atol(PQgetvalue(res, row, 6)), PQgetvalue(res, row, 7), PQgetvalue(res, row, 8) ); }; /* cleanup */ PQclear(res); } /* * Dump all buffercache stats. */ void print_buffercache() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long usedblocks = 0; long usedblocks_pct = 0; long dirtyblocks = 0; long dirtyblocks_pct = 0; snprintf(sql, sizeof(sql), "SELECT count(*) FILTER (WHERE relfilenode IS NOT NULL), " "100. * count(*) FILTER (WHERE relfilenode IS NOT NULL) / count(*), " "count(*) FILTER (WHERE isdirty), " "100. * count(*) FILTER (WHERE isdirty) / count(*) " "FROM %s.pg_buffercache ", opts->namespace); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { column = 0; usedblocks = atol(PQgetvalue(res, row, column++)); usedblocks_pct = atol(PQgetvalue(res, row, column++)); dirtyblocks = atol(PQgetvalue(res, row, column++)); dirtyblocks_pct = atol(PQgetvalue(res, row, column++)); /* printing the actual values for once */ (void)printf(" %4ld %4ld %4ld %4ld\n", usedblocks, usedblocks_pct, dirtyblocks, dirtyblocks_pct); } /* cleanup */ PQclear(res); } /* * Dump all xlog writes stats. */ void print_xlogstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *xlogfilename; char *currentlocation; char *prettylocation; long locationdiff; char h_locationdiff[PGSTAT_DEFAULT_STRING_SIZE]; if (backend_minimum_version(10, 0)) { snprintf(sql, sizeof(sql), "SELECT " " pg_walfile_name(pg_current_wal_lsn()), " " pg_current_wal_lsn(), " " pg_wal_lsn_diff(pg_current_wal_lsn(), '0/0'), " " pg_size_pretty(pg_wal_lsn_diff(pg_current_wal_lsn(), '%s'))", previous_xlogstats->location); } else { snprintf(sql, sizeof(sql), "SELECT " " pg_xlogfile_name(pg_current_xlog_location()), " " pg_current_xlog_location(), " " pg_xlog_location_diff(pg_current_xlog_location(), '0/0'), " " pg_size_pretty(pg_xlog_location_diff(pg_current_xlog_location(), '%s'))", previous_xlogstats->location); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } xlogfilename = pg_strdup(PQgetvalue(res, 0, 0)); currentlocation = pg_strdup(PQgetvalue(res, 0, 1)); locationdiff = atol(PQgetvalue(res, 0, 2)); prettylocation = pg_strdup(PQgetvalue(res, 0, 3)); /* get the human-readable diff if asked */ if (opts->human_readable) { snprintf(h_locationdiff, sizeof(h_locationdiff), "%s", prettylocation); } else { snprintf(h_locationdiff, sizeof(h_locationdiff), "%12ld", locationdiff - previous_xlogstats->locationdiff); } /* printing the actual values for once */ (void)printf(" %s %s %s\n", xlogfilename, currentlocation, h_locationdiff); /* setting the new old value */ previous_xlogstats->location = pg_strdup(currentlocation); previous_xlogstats->locationdiff = locationdiff; /* cleanup */ PQclear(res); } /* * Dump all repslots informations */ void print_repslotsstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *xlogfilename; char *currentlocation; long locationdiff; char *prettylocation; char h_locationdiff[PGSTAT_DEFAULT_STRING_SIZE]; snprintf(sql, sizeof(sql), "SELECT " " pg_walfile_name(restart_lsn), " " restart_lsn, " " pg_wal_lsn_diff(restart_lsn, '0/0'), " " pg_size_pretty(pg_wal_lsn_diff(restart_lsn, '%s')) " "FROM pg_replication_slots " "WHERE slot_name = '%s'", previous_repslots->restartlsn, opts->filter); res = PQexec(conn, sql); if (!res || PQntuples(res) == 0) { warnx("pgstat: No results, meaning no replicaton slot"); PQclear(res); PQfinish(conn); errx(1, "pgstat: exiting"); } /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } xlogfilename = pg_strdup(PQgetvalue(res, 0, 0)); currentlocation = pg_strdup(PQgetvalue(res, 0, 1)); locationdiff = atol(PQgetvalue(res, 0, 2)); prettylocation = pg_strdup(PQgetvalue(res, 0, 3)); /* get the human-readable diff if asked */ if (opts->human_readable) { snprintf(h_locationdiff, sizeof(h_locationdiff), "%s", prettylocation); } else { snprintf(h_locationdiff, sizeof(h_locationdiff), "%12ld", locationdiff - previous_repslots->restartlsndiff); } /* printing the actual values for once */ (void)printf(" %s %s %s\n", xlogfilename, currentlocation, h_locationdiff); /* setting the new old value */ previous_repslots->restartlsn = pg_strdup(currentlocation); previous_repslots->restartlsndiff = locationdiff; /* cleanup */ PQclear(res); } /* * Dump all temporary files stats. */ void print_tempfilestats() { char sql[2*PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; long size = 0; long count = 0; int nrows; int row, column; if (backend_minimum_version(9, 3)) { snprintf(sql, sizeof(sql), "SELECT unnest(regexp_matches(agg.tmpfile, 'pgsql_tmp([0-9]*)')) AS pid, " " SUM((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size), " " count(*) " "FROM " " (SELECT ls.oid, ls.spcname, " " ls.dir||'/'||ls.sub AS dir, CASE gs.i WHEN 1 THEN '' ELSE pglsdir END AS tmpfile " " FROM " " (SELECT sr.oid, sr.spcname, " " 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, " " pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub " " FROM (SELECT spc.oid, spc.spcname, " " pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, " " trim(trailing E'\n ' FROM pg_read_file('PG_VERSION')) as v " " FROM (SELECT oid, spcname FROM pg_tablespace WHERE spcname !~ '^pg_') AS spc " " ) sr " " WHERE sr.spc_root ~ ('^PG_'||sr.v) " " UNION ALL " " SELECT 0, 'pg_default', " " 'base' AS dir, " " 'pgsql_tmp' AS sub " " FROM pg_ls_dir('base') AS l " " WHERE l='pgsql_tmp' " " ) AS ls, " " (SELECT generate_series(1,2) AS i) AS gs, " " LATERAL pg_ls_dir(dir||'/'||ls.sub) pglsdir " " WHERE ls.sub = 'pgsql_tmp') agg " "GROUP BY 1"); } else { snprintf(sql, sizeof(sql), "SELECT unnest(regexp_matches(agg.tmpfile, 'pgsql_tmp([0-9]*)')) AS pid, " " SUM((pg_stat_file(agg.dir||'/'||agg.tmpfile)).size), " " count(*) " "FROM " " (SELECT ls.oid, ls.spcname, " " ls.dir||'/'||ls.sub AS dir, CASE gs.i WHEN 1 THEN '' ELSE pg_ls_dir(dir||'/'||ls.sub) END AS tmpfile " " FROM " " (SELECT sr.oid, sr.spcname, " " 'pg_tblspc/'||sr.oid||'/'||sr.spc_root AS dir, " " pg_ls_dir('pg_tblspc/'||sr.oid||'/'||sr.spc_root) AS sub " " FROM (SELECT spc.oid, spc.spcname, " " pg_ls_dir('pg_tblspc/'||spc.oid) AS spc_root, " " trim(trailing E'\n ' FROM pg_read_file('PG_VERSION')) as v " " FROM (SELECT oid, spcname FROM pg_tablespace WHERE spcname !~ '^pg_') AS spc " " ) sr " " WHERE sr.spc_root ~ ('^PG_'||sr.v) " " UNION ALL " " SELECT 0, 'pg_default', " " 'base' AS dir, " " 'pgsql_tmp' AS sub " " FROM pg_ls_dir('base') AS l " " WHERE l='pgsql_tmp' " " ) AS ls, " " (SELECT generate_series(1,2) AS i) AS gs " " WHERE ls.sub = 'pgsql_tmp') agg " "GROUP BY 1"); } res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { column = 1; /* getting new values */ size += atol(PQgetvalue(res, row, column++)); count += atol(PQgetvalue(res, row, column++)); } /* printing the diff... */ (void)printf(" %9ld %9ld\n", size, count); /* cleanup */ PQclear(res); } /* * Dump all wait event stats. */ void print_pgstatwaitevent() { char sql[2*PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row; snprintf(sql, sizeof(sql), "SELECT " " count(*) FILTER (WHERE wait_event_type='LWLock') AS LWLock, " " count(*) FILTER (WHERE wait_event_type='Lock') AS Lock, " " count(*) FILTER (WHERE wait_event_type='BufferPin') AS BufferPin, " " count(*) FILTER (WHERE wait_event_type='Activity') AS Activity, " " count(*) FILTER (WHERE wait_event_type='Client') AS Client, " " count(*) FILTER (WHERE wait_event_type='Extension') AS Extension, " " count(*) FILTER (WHERE wait_event_type='IPC') AS IPC, " " count(*) FILTER (WHERE wait_event_type='Timeout') AS Timeout, " " count(*) FILTER (WHERE wait_event_type='IO') AS IO, " " count(*) FILTER (WHERE wait_event_type IS NULL) AS Running, " " count(*) AS All " "FROM pg_stat_activity;"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ for (row = 0; row < nrows; row++) { /* printing new values */ (void)printf(" %12d %8d %13d %12d %10d %13d %7d %11d %6d %11d %7d\n", atoi(PQgetvalue(res, row, 0)), atoi(PQgetvalue(res, row, 1)), atoi(PQgetvalue(res, row, 2)), atoi(PQgetvalue(res, row, 3)), atoi(PQgetvalue(res, row, 4)), atoi(PQgetvalue(res, row, 5)), atoi(PQgetvalue(res, row, 6)), atoi(PQgetvalue(res, row, 7)), atoi(PQgetvalue(res, row, 8)), atoi(PQgetvalue(res, row, 9)), atoi(PQgetvalue(res, row, 10)) ); } /* cleanup */ PQclear(res); } /* * Dump all pgBouncer pools stats. */ void print_pgbouncerpools() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long cl_active = 0; long cl_waiting = 0; long sv_active = 0; long sv_idle = 0; long sv_used = 0; long sv_tested = 0; long sv_login = 0; long maxwait = 0; /* * We cannot use a filter now, we need to get all rows. */ snprintf(sql, sizeof(sql), "SHOW pools"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* we don't use the first two columns */ column = 2; /* getting new values */ cl_active += atol(PQgetvalue(res, row, column++)); cl_waiting += atol(PQgetvalue(res, row, column++)); sv_active += atol(PQgetvalue(res, row, column++)); sv_idle += atol(PQgetvalue(res, row, column++)); sv_used += atol(PQgetvalue(res, row, column++)); sv_tested += atol(PQgetvalue(res, row, column++)); sv_login += atol(PQgetvalue(res, row, column++)); maxwait += atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld %6ld %6ld %6ld %6ld\n", cl_active, cl_waiting, sv_active, sv_idle, sv_used, sv_tested, sv_login, maxwait ); /* cleanup */ PQclear(res); } /* * Dump all pgBouncer stats. */ void print_pgbouncerstats() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; int nrows; int row, column; long total_request = 0; long total_received = 0; long total_sent = 0; long total_query_time = 0; /* * We cannot use a filter now, we need to get all rows. */ snprintf(sql, sizeof(sql), "SHOW stats"); res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the number of fields */ nrows = PQntuples(res); /* for each row, dump the information */ /* this is stupid, a simple if would do the trick, but it will help for other cases */ for (row = 0; row < nrows; row++) { /* we don't use the first column */ column = 1; /* getting new values */ total_request += atol(PQgetvalue(res, row, column++)); total_received += atol(PQgetvalue(res, row, column++)); total_sent += atol(PQgetvalue(res, row, column++)); total_query_time += atol(PQgetvalue(res, row, column++)); } /* printing the diff... * note that the first line will be the current value, rather than the diff */ (void)printf(" %6ld %6ld %6ld %6ld\n", total_request - previous_pgbouncerstats->total_request, total_received - previous_pgbouncerstats->total_received, total_sent - previous_pgbouncerstats->total_sent, total_query_time - previous_pgbouncerstats->total_query_time ); /* setting the new old value */ previous_pgbouncerstats->total_request = total_request; previous_pgbouncerstats->total_received = total_received; previous_pgbouncerstats->total_sent = total_sent; previous_pgbouncerstats->total_query_time = total_query_time; /* cleanup */ PQclear(res); } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Fetch setting value */ char *fetch_setting(char *name) { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; char *setting; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT setting FROM pg_settings WHERE name='%s'", name); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } /* get the only row as the setting value */ setting = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("%s is set to %s\n", name, setting); /* cleanup */ PQclear(res); return setting; } /* * Fetch pg_buffercache namespace */ void fetch_pgbuffercache_namespace() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the pg_stat_statement installation schema */ if (backend_minimum_version(9, 1)) { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_extension e " "JOIN pg_namespace n ON e.extnamespace=n.oid " "WHERE extname='pg_buffercache'"); } else { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_proc p " "JOIN pg_namespace n ON p.pronamespace=n.oid " "WHERE proname='pg_buffercache'"); } /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } if (PQntuples(res) > 0) { /* get the only row, and parse it to get major and minor numbers */ opts->namespace = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("pg_buffercache namespace: %s\n", opts->namespace); } /* cleanup */ PQclear(res); } /* * Fetch pg_stat_statement namespace */ void fetch_pgstatstatements_namespace() { char sql[PGSTAT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the pg_stat_statement installation schema */ if (backend_minimum_version(9, 1)) { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_extension e " "JOIN pg_namespace n ON e.extnamespace=n.oid " "WHERE extname='pg_stat_statements'"); } else { snprintf(sql, sizeof(sql), "SELECT nspname FROM pg_proc p " "JOIN pg_namespace n ON p.pronamespace=n.oid " "WHERE proname='pg_stat_statements'"); } /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgstat: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgstat: query was: %s", sql); } if (PQntuples(res) > 0) { /* get the only row, and parse it to get major and minor numbers */ opts->namespace = pg_strdup(PQgetvalue(res, 0, 0)); /* print version */ if (opts->verbose) printf("pg_stat_statements namespace: %s\n", opts->namespace); } /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Print the right header according to the stats mode */ void print_header(void) { switch(opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: (void)printf("---- WAL counts ----\n"); (void)printf(" archived failed \n"); break; case BGWRITER: (void)printf("------------ checkpoints ------------- ------------- buffers ------------- ---------- misc ----------\n"); (void)printf(" timed requested write_time sync_time checkpoint clean backend alloc maxwritten backend_fsync\n"); break; case CONNECTION: (void)printf(" - total - active - lockwaiting - idle in transaction - idle -\n"); break; case DATABASE: (void)printf("- backends - ------ xacts ------ ---------------- blocks ---------------- -------------- tuples -------------- ------ temp ------ ---------------------------- session ---------------------------- ------------ misc -------------\n"); (void)printf(" commit rollback read hit read_time write_time ret fet ins upd del files bytes all_time active_time iit_time numbers abandoned fatal killed conflicts deadlocks checksums\n"); break; case TABLE: (void)printf("-- sequential -- ------ index ------ -------------------- tuples ----------------------------- -------------- maintenance --------------\n"); (void)printf(" scan tuples scan tuples ins upd del hotupd live dead analyze ins_vac vacuum autovacuum analyze autoanalyze\n"); break; case TABLEIO: (void)printf("--- heap table --- --- toast table --- --- heap indexes --- --- toast indexes ---\n"); (void)printf(" read hit read hit read hit read hit \n"); break; case INDEX: (void)printf("-- scan -- ----- tuples -----\n"); (void)printf(" read fetch\n"); break; case FUNCTION: (void)printf("-- count -- ------ time ------\n"); (void)printf(" total self\n"); break; case STATEMENT: (void)printf("----- plan ----- --------- exec ---------- ----------- shared ----------- ----------- local ----------- ----- temp ----- -------- time -------- -------------- wal --------------\n"); (void)printf(" plans time calls time rows hit read dirty written hit read dirty written read written read written wal_records wal_fpi wal_bytes\n"); break; case SLRU: (void)printf(" zeroed hit read written exists flushes truncates\n"); break; case WAL: (void)printf(" records FPI bytes buffers_full write sync write_time sync_time\n"); break; case BUFFERCACHE: (void)printf("------- used ------- ------ dirty ------\n"); (void)printf(" total percent total percent\n"); break; case XLOG: case REPSLOTS: (void)printf("-------- filename -------- -- location -- ---- bytes ----\n"); break; case TEMPFILE: (void)printf("--- size --- --- count ---\n"); break; case WAITEVENT: (void)printf("--- LWLock --- Lock --- BufferPin --- Activity --- Client --- Extension --- IPC --- Timeout --- IO --- Running --- All ---\n"); break; case PROGRESS_ANALYZE: (void)printf("--------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- -- time elapsed --\n"); (void)printf(" database relation size %%sample blocks %%ext stats %%child tables\n"); break; case PROGRESS_BASEBACKUP: (void)printf("--- pid --- ---------- phase ---------- ---------------------- stats -------------------- -- time elapsed --\n"); (void)printf(" Sent size - Total size - %%Sent - %%Tablespaces\n"); break; case PROGRESS_CLUSTER: (void)printf("--------------------------- object -------------------------- -------------------- phase -------------------- ------------------- stats ------------------- -- time elapsed --\n"); (void)printf(" database table index tuples scanned tuples written %%blocks index rebuilt\n"); break; case PROGRESS_COPY: (void)printf("----------------- object ---------------- -------------------- phase -------------------- --------- bytes --------- ------- tuples -------- -- time elapsed --\n"); (void)printf(" database table command type processed total processed excluded\n"); break; case PROGRESS_CREATEINDEX: (void)printf("--------------------------- object -------------------------- -------------------- phase -------------------- ------------------- stats ------------------- -- time elapsed --\n"); (void)printf(" database table index %%lockers %%blocks %%tuples %%partitions\n"); break; case PROGRESS_VACUUM: (void)printf("--------------------- object --------------------- ---------- phase ---------- ---------------- stats --------------- -- time elapsed --\n"); (void)printf(" database relation size %%scan %%vacuum #index %%dead tuple\n"); break; case PBPOOLS: (void)printf("---- client ----- ---------------- server ---------------- -- misc --\n"); (void)printf(" active waiting active idle used tested login maxwait\n"); break; case PBSTATS: (void)printf("---------------- total -----------------\n"); (void)printf(" request received sent query time\n"); break; } if (wresized != 0) doresize(); if (opts->dontredisplayheader) hdrcnt = 0; else hdrcnt = winlines; } /* * Call the right function according to the stats mode */ void print_line(void) { switch(opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: print_pgstatarchiver(); break; case BGWRITER: print_pgstatbgwriter(); break; case CONNECTION: print_pgstatconnection(); break; case DATABASE: print_pgstatdatabase(); break; case TABLE: print_pgstattable(); break; case TABLEIO: print_pgstattableio(); break; case INDEX: print_pgstatindex(); break; case FUNCTION: print_pgstatfunction(); break; case STATEMENT: print_pgstatstatement(); break; case SLRU: print_pgstatslru(); break; case WAL: print_pgstatwal(); break; case BUFFERCACHE: print_buffercache(); break; case XLOG: print_xlogstats(); break; case REPSLOTS: print_repslotsstats(); break; case PROGRESS_ANALYZE: print_pgstatprogressanalyze(); break; case PROGRESS_BASEBACKUP: print_pgstatprogressbasebackup(); break; case PROGRESS_CLUSTER: print_pgstatprogresscluster(); break; case PROGRESS_COPY: print_pgstatprogresscopy(); break; case PROGRESS_CREATEINDEX: print_pgstatprogresscreateindex(); break; case PROGRESS_VACUUM: print_pgstatprogressvacuum(); break; case TEMPFILE: print_tempfilestats(); break; case WAITEVENT: print_pgstatwaitevent(); break; case PBPOOLS: print_pgbouncerpools(); break; case PBSTATS: print_pgbouncerstats(); break; } } /* * Allocate and initialize the right statistics struct according to the stats mode */ void allocate_struct(void) { switch (opts->stat) { case NONE: /* That shouldn't happen */ break; case ARCHIVER: previous_pgstatarchiver = (struct pgstatarchiver *) pg_malloc(sizeof(struct pgstatarchiver)); previous_pgstatarchiver->archived_count = 0; previous_pgstatarchiver->failed_count = 0; previous_pgstatarchiver->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case BGWRITER: previous_pgstatbgwriter = (struct pgstatbgwriter *) pg_malloc(sizeof(struct pgstatbgwriter)); previous_pgstatbgwriter->checkpoints_timed = 0; previous_pgstatbgwriter->checkpoints_req = 0; previous_pgstatbgwriter->checkpoint_write_time = 0; previous_pgstatbgwriter->checkpoint_sync_time = 0; previous_pgstatbgwriter->buffers_checkpoint = 0; previous_pgstatbgwriter->buffers_clean = 0; previous_pgstatbgwriter->maxwritten_clean = 0; previous_pgstatbgwriter->buffers_backend = 0; previous_pgstatbgwriter->buffers_backend_fsync = 0; previous_pgstatbgwriter->buffers_alloc = 0; previous_pgstatbgwriter->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case CONNECTION: // nothing to do break; case DATABASE: previous_pgstatdatabase = (struct pgstatdatabase *) pg_malloc(sizeof(struct pgstatdatabase)); previous_pgstatdatabase->xact_commit = 0; previous_pgstatdatabase->xact_rollback = 0; previous_pgstatdatabase->blks_read = 0; previous_pgstatdatabase->blks_hit = 0; previous_pgstatdatabase->tup_returned = 0; previous_pgstatdatabase->tup_fetched = 0; previous_pgstatdatabase->tup_inserted = 0; previous_pgstatdatabase->tup_updated = 0; previous_pgstatdatabase->tup_deleted = 0; previous_pgstatdatabase->conflicts = 0; previous_pgstatdatabase->temp_files = 0; previous_pgstatdatabase->temp_bytes = 0; previous_pgstatdatabase->deadlocks = 0; previous_pgstatdatabase->checksum_failures = 0; previous_pgstatdatabase->blk_read_time = 0; previous_pgstatdatabase->blk_write_time = 0; previous_pgstatdatabase->session_time = 0; previous_pgstatdatabase->active_time = 0; previous_pgstatdatabase->idle_in_transaction_time = 0; previous_pgstatdatabase->sessions = 0; previous_pgstatdatabase->sessions_abandoned = 0; previous_pgstatdatabase->sessions_fatal = 0; previous_pgstatdatabase->sessions_killed = 0; previous_pgstatdatabase->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case TABLE: previous_pgstattable = (struct pgstattable *) pg_malloc(sizeof(struct pgstattable)); previous_pgstattable->seq_scan = 0; previous_pgstattable->seq_tup_read = 0; previous_pgstattable->idx_scan = 0; previous_pgstattable->idx_tup_fetch = 0; previous_pgstattable->n_tup_ins = 0; previous_pgstattable->n_tup_upd = 0; previous_pgstattable->n_tup_del = 0; previous_pgstattable->n_tup_hot_upd = 0; previous_pgstattable->n_live_tup = 0; previous_pgstattable->n_dead_tup = 0; previous_pgstattable->n_mod_since_analyze = 0; previous_pgstattable->n_ins_since_vacuum = 0; previous_pgstattable->vacuum_count = 0; previous_pgstattable->autovacuum_count = 0; previous_pgstattable->analyze_count = 0; previous_pgstattable->autoanalyze_count = 0; break; case TABLEIO: previous_pgstattableio = (struct pgstattableio *) pg_malloc(sizeof(struct pgstattableio)); previous_pgstattableio->heap_blks_read = 0; previous_pgstattableio->heap_blks_hit = 0; previous_pgstattableio->idx_blks_read = 0; previous_pgstattableio->idx_blks_hit = 0; previous_pgstattableio->toast_blks_read = 0; previous_pgstattableio->toast_blks_hit = 0; previous_pgstattableio->tidx_blks_read = 0; previous_pgstattableio->tidx_blks_hit = 0; break; case INDEX: previous_pgstatindex = (struct pgstatindex *) pg_malloc(sizeof(struct pgstatindex)); previous_pgstatindex->idx_scan = 0; previous_pgstatindex->idx_tup_read = 0; previous_pgstatindex->idx_tup_fetch = 0; break; case FUNCTION: previous_pgstatfunction = (struct pgstatfunction *) pg_malloc(sizeof(struct pgstatfunction)); previous_pgstatfunction->calls = 0; previous_pgstatfunction->total_time = 0; previous_pgstatfunction->self_time = 0; break; case STATEMENT: previous_pgstatstatement = (struct pgstatstatement *) pg_malloc(sizeof(struct pgstatstatement)); previous_pgstatstatement->plans = 0; previous_pgstatstatement->total_plan_time = 0; previous_pgstatstatement->calls = 0; previous_pgstatstatement->total_exec_time = 0; previous_pgstatstatement->rows = 0; previous_pgstatstatement->shared_blks_hit = 0; previous_pgstatstatement->shared_blks_read = 0; previous_pgstatstatement->shared_blks_dirtied = 0; previous_pgstatstatement->shared_blks_written = 0; previous_pgstatstatement->local_blks_hit = 0; previous_pgstatstatement->local_blks_read = 0; previous_pgstatstatement->local_blks_dirtied = 0; previous_pgstatstatement->local_blks_written = 0; previous_pgstatstatement->temp_blks_read = 0; previous_pgstatstatement->temp_blks_written = 0; previous_pgstatstatement->blk_read_time = 0; previous_pgstatstatement->blk_write_time = 0; previous_pgstatstatement->wal_records = 0; previous_pgstatstatement->wal_fpi = 0; previous_pgstatstatement->wal_bytes = 0; break; case SLRU: previous_pgstatslru = (struct pgstatslru *) pg_malloc(sizeof(struct pgstatslru)); previous_pgstatslru->blks_zeroed = 0; previous_pgstatslru->blks_hit = 0; previous_pgstatslru->blks_read = 0; previous_pgstatslru->blks_written = 0; previous_pgstatslru->blks_exists = 0; previous_pgstatslru->flushes = 0; previous_pgstatslru->truncates = 0; previous_pgstatslru->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case WAL: previous_pgstatwal = (struct pgstatwal *) pg_malloc(sizeof(struct pgstatwal)); previous_pgstatwal->wal_records = 0; previous_pgstatwal->wal_fpi = 0; previous_pgstatwal->wal_bytes = 0; previous_pgstatwal->wal_buffers_full = 0; previous_pgstatwal->wal_write = 0; previous_pgstatwal->wal_sync = 0; previous_pgstatwal->wal_write_time = 0; previous_pgstatwal->wal_sync_time = 0; previous_pgstatwal->stats_reset = PGSTAT_OLDEST_STAT_RESET; break; case XLOG: previous_xlogstats = (struct xlogstats *) pg_malloc(sizeof(struct xlogstats)); previous_xlogstats->location = pg_strdup("0/0"); previous_xlogstats->locationdiff = 0; break; case REPSLOTS: previous_repslots = (struct repslots *) pg_malloc(sizeof(struct repslots)); previous_repslots->restartlsn = pg_strdup("0/0"); previous_repslots->restartlsndiff = 0; break; case BUFFERCACHE: case TEMPFILE: case WAITEVENT: case PROGRESS_ANALYZE: case PROGRESS_BASEBACKUP: case PROGRESS_CLUSTER: case PROGRESS_COPY: case PROGRESS_CREATEINDEX: case PROGRESS_VACUUM: case PBPOOLS: // no initialization worth doing... break; case PBSTATS: previous_pgbouncerstats = (struct pgbouncerstats *) pg_malloc(sizeof(struct pgbouncerstats)); previous_pgbouncerstats->total_request = 0; previous_pgbouncerstats->total_received = 0; previous_pgbouncerstats->total_sent = 0; previous_pgbouncerstats->total_query_time = 0; break; } } /* * Force a header to be prepended to the next output. */ static void needhdr(int dummy) { hdrcnt = 1; } /* * When the terminal is resized, force an update of the maximum number of rows * printed between each header repetition. Then force a new header to be * prepended to the next output. */ void needresize(int signo) { wresized = 1; } /* * Update the global `winlines' count of terminal rows. */ void doresize(void) { int status; struct winsize w; for (;;) { status = ioctl(fileno(stdout), TIOCGWINSZ, &w); if (status == -1 && errno == EINTR) continue; else if (status == -1) errx(1, "ioctl"); if (w.ws_row > 3) winlines = w.ws_row - 3; else winlines = PGSTAT_DEFAULT_LINES; break; } /* * Inhibit doresize() calls until we are rescheduled by SIGWINCH. */ wresized = 0; hdrcnt = 1; } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { PQfinish(conn); exit(1); } /* * Main function */ int main(int argc, char **argv) { /* * If the user stops the program (control-Z) and then resumes it, * print out the header again. */ pqsignal(SIGCONT, needhdr); pqsignal(SIGINT, quit_properly); /* * If our standard output is a tty, then install a SIGWINCH handler * and set wresized so that our first iteration through the main * pgstat loop will peek at the terminal's current rows to find out * how many lines can fit in a screenful of output. */ if (isatty(fileno(stdout)) != 0) { wresized = 1; (void)signal(SIGWINCH, needresize); } else { wresized = 0; winlines = PGSTAT_DEFAULT_LINES; } /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Connect to the database */ conn = sql_conn(); /* Get PostgreSQL version * (if we are not connected to the pseudo pgBouncer database) */ if (opts->stat != PBPOOLS && opts->stat != PBSTATS) { fetch_version(); } /* Without the -s option, defaults to the bgwriter statistics */ if (opts->stat == NONE) { opts->stat = BGWRITER; } /* Check if the release number matches the statistics */ if ((opts->stat == CONNECTION || opts->stat == XLOG) && !backend_minimum_version(9, 2)) { PQfinish(conn); errx(1, "You need at least v9.2 for this statistic."); } if (opts->stat == ARCHIVER && !backend_minimum_version(9, 4)) { PQfinish(conn); errx(1, "You need at least v9.4 for this statistic."); } if ((opts->stat == PROGRESS_VACUUM || opts->stat == WAITEVENT) && !backend_minimum_version(9, 6)) { PQfinish(conn); errx(1, "You need at least v9.6 for this statistic."); } if ((opts->stat == PROGRESS_CREATEINDEX || opts->stat == PROGRESS_CLUSTER) && !backend_minimum_version(12, 0)) { PQfinish(conn); errx(1, "You need at least v12 for this statistic."); } if ((opts->stat == PROGRESS_ANALYZE || opts->stat == PROGRESS_BASEBACKUP|| opts->stat == SLRU) && !backend_minimum_version(13, 0)) { PQfinish(conn); errx(1, "You need at least v13 for this statistic."); } if ((opts->stat == WAL || opts->stat == PROGRESS_COPY) && !backend_minimum_version(14, 0)) { PQfinish(conn); errx(1, "You need at least v14 for this statistic."); } /* Check if the configuration matches the statistics */ if (opts->stat == FUNCTION) { if (strcmp(fetch_setting("track_functions"), "none") == 0) { PQfinish(conn); errx(1, "track_functions is set to \"none\"."); } } if (opts->stat == STATEMENT) { fetch_pgstatstatements_namespace(); if (!opts->namespace) { PQfinish(conn); errx(1, "Cannot find the pg_stat_statements extension."); } } if (opts->stat == BUFFERCACHE) { fetch_pgbuffercache_namespace(); if (!opts->namespace) { PQfinish(conn); errx(1, "Cannot find the pg_buffercache extension."); } } /* Filter required for replication slots */ if (opts->stat == REPSLOTS && !opts->filter) { PQfinish(conn); errx(1, "You need to specify a replication slot with -f for this statistic."); } /* Allocate and initialize statistics struct */ allocate_struct(); /* Grab cluster stats info */ for (hdrcnt = 1;;) { if (!--hdrcnt) print_header(); print_line(); (void)fflush(stdout); if (--opts->count == 0) break; (void)usleep(opts->interval * 1000000); } PQfinish(conn); return 0; } pgstats-REL1_2_0/pgwaitevent.c000066400000000000000000000453151406310430400164100ustar00rootroot00000000000000/* * pgwaitevent, a PostgreSQL app to gather statistical informations * on wait events of PostgreSQL PID backend. * * This software is released under the PostgreSQL Licence. * * Guillaume Lelarge, guillaume@lelarge.info, 2019-2021. * * pgstats/pgwaitevent.c */ /* * Headers */ #include "postgres_fe.h" #include "common/string.h" #include #include #include #include #include #ifdef HAVE_GETOPT_H #include #endif #include "libpq-fe.h" #include "libpq/pqsignal.h" /* * Defines */ #define PGWAITEVENT_VERSION "1.2.0" #define PGWAITEVENT_DEFAULT_LINES 20 #define PGWAITEVENT_DEFAULT_STRING_SIZE 2048 /* * Structs */ /* these are the options structure for command line parameters */ struct options { /* misc */ bool verbose; /* connection parameters */ char *dbname; char *hostname; char *port; char *username; /* version number */ int major; int minor; /* pid */ int pid; /* include leader and workers PIDs */ bool includeleaderworkers; /* frequency */ float interval; /* query and trace timestamps */ char *query_start; char *trace_start; }; /* * Global variables */ PGconn *conn; struct options *opts; extern char *optarg; /* * Function prototypes */ static void help(const char *progname); void get_opts(int, char **); #ifndef FE_MEMUTILS_H void *pg_malloc(size_t size); char *pg_strdup(const char *in); #endif PGconn *sql_conn(void); void fetch_version(void); bool backend_minimum_version(int major, int minor); void build_env(void); bool active_session(void); void handle_current_query(void); void drop_env(void); static void quit_properly(SIGNAL_ARGS); /* * Print help message */ static void help(const char *progname) { printf("%s gathers every wait events from a specific PID, grouping them by queries.\n\n" "Usage:\n" " %s [OPTIONS] PID\n" "\nGeneral options:\n" " -g include leader and workers (parallel queries) [v13+]\n" " -i interval (default is 1s)\n" " -v verbose\n" " -?|--help show this help, then exit\n" " -V|--version output version information, then exit\n" "\nConnection options:\n" " -h HOSTNAME database server host or socket directory\n" " -p PORT database server port number\n" " -U USER connect as specified database user\n" " -d DBNAME database to connect to\n\n" "Report bugs to .\n", progname, progname); } /* * Parse command line options and check for some usage errors */ void get_opts(int argc, char **argv) { int c; const char *progname; progname = get_progname(argv[0]); /* set the defaults */ opts->verbose = false; opts->dbname = NULL; opts->hostname = NULL; opts->port = NULL; opts->username = NULL; opts->pid = 0; opts->includeleaderworkers = false; opts->interval = 1; /* we should deal quickly with help and version */ if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pgwaitevent " PGWAITEVENT_VERSION " (compiled with PostgreSQL " PG_VERSION ")"); exit(0); } } /* get options */ while ((c = getopt(argc, argv, "h:p:U:d:i:gv")) != -1) { switch (c) { /* specify the database */ case 'd': opts->dbname = pg_strdup(optarg); break; /* host to connect to */ case 'h': opts->hostname = pg_strdup(optarg); break; /* parallel queries */ case 'g': opts->includeleaderworkers = true; break; /* interval */ case 'i': opts->interval = atof(optarg); break; /* port to connect to on remote host */ case 'p': opts->port = pg_strdup(optarg); break; /* username */ case 'U': opts->username = pg_strdup(optarg); break; /* get verbose */ case 'v': opts->verbose = true; break; default: errx(1, "Try \"%s --help\" for more information.\n", progname); } } /* get PID to monitor */ if (optind < argc) { opts->pid = atoi(argv[optind]); } else { errx(1, "PID required.\nTry \"%s --help\" for more information.\n", progname); } /* set dbname if unset */ if (opts->dbname == NULL) { /* * We want to use dbname for possible error reports later, * and in case someone has set and is using PGDATABASE * in its environment, preserve that name for later usage */ if (!getenv("PGDATABASE")) opts->dbname = "postgres"; else opts->dbname = getenv("PGDATABASE"); } } #ifndef FE_MEMUTILS_H /* * "Safe" wrapper around malloc(). */ void * pg_malloc(size_t size) { void *tmp; /* Avoid unportable behavior of malloc(0) */ if (size == 0) size = 1; tmp = malloc(size); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } /* * "Safe" wrapper around strdup(). */ char * pg_strdup(const char *in) { char *tmp; if (!in) { fprintf(stderr, "cannot duplicate null pointer (internal error)\n"); exit(EXIT_FAILURE); } tmp = strdup(in); if (!tmp) { fprintf(stderr, "out of memory\n"); exit(EXIT_FAILURE); } return tmp; } #endif /* * Establish the PostgreSQL connection */ PGconn * sql_conn() { PGconn *my_conn; char *password = NULL; bool new_pass; #if PG_VERSION_NUM >= 90300 const char **keywords; const char **values; #else int size; char *dns; #endif char *message; /* * Start the connection. Loop until we have a password if requested by * backend. */ do { #if PG_VERSION_NUM >= 90300 /* * We don't need to check if the database name is actually a complete * connection string, PQconnectdbParams being smart enough to check * this itself. */ #define PARAMS_ARRAY_SIZE 8 keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); keywords[0] = "host"; values[0] = opts->hostname, keywords[1] = "port"; values[1] = opts->port; keywords[2] = "user"; values[2] = opts->username; keywords[3] = "password"; values[3] = password; keywords[4] = "dbname"; values[4] = opts->dbname; keywords[5] = "fallback_application_name"; values[5] = "pgwaitevent"; keywords[7] = NULL; values[7] = NULL; my_conn = PQconnectdbParams(keywords, values, true); #else /* 34 is the length of the fallback application name setting */ size = 34; if (opts->hostname) size += strlen(opts->hostname) + 6; if (opts->port) size += strlen(opts->port) + 6; if (opts->username) size += strlen(opts->username) + 6; if (opts->dbname) size += strlen(opts->dbname) + 8; dns = pg_malloc(size); /* * Checking the presence of a = sign is our way to check that the * database name is actually a connection string. In such a case, we * keep this string as the connection string, and add other parameters * if they are supplied. */ sprintf(dns, "%s", "fallback_application_name='pgwaitevent' "); if (strchr(opts->dbname, '=') != NULL) sprintf(dns, "%s%s", dns, opts->dbname); else if (opts->dbname) sprintf(dns, "%sdbname=%s ", dns, opts->dbname); if (opts->hostname) sprintf(dns, "%shost=%s ", dns, opts->hostname); if (opts->port) sprintf(dns, "%sport=%s ", dns, opts->port); if (opts->username) sprintf(dns, "%suser=%s ", dns, opts->username); if (opts->verbose) printf("Connection string: %s\n", dns); my_conn = PQconnectdb(dns); #endif new_pass = false; if (!my_conn) { errx(1, "could not connect to database %s\n", opts->dbname); } #if PG_VERSION_NUM >= 80200 if (PQstatus(my_conn) == CONNECTION_BAD && PQconnectionNeedsPassword(my_conn) && !password) { PQfinish(my_conn); #if PG_VERSION_NUM < 100000 password = simple_prompt("Password: ", 100, false); #elif PG_VERSION_NUM < 140000 simple_prompt("Password: ", password, 100, false); #else password = simple_prompt("Password: ", false); #endif new_pass = true; } #endif } while (new_pass); if (password) free(password); /* check to see that the backend connection was successfully made */ if (PQstatus(my_conn) == CONNECTION_BAD) { message = PQerrorMessage(my_conn); PQfinish(my_conn); errx(1, "could not connect to database %s: %s", opts->dbname, message); } /* return the conn if good */ return my_conn; } /* * Fetch PostgreSQL major and minor numbers */ void fetch_version() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* get the cluster version */ snprintf(sql, sizeof(sql), "SELECT version()"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* get the only row, and parse it to get major and minor numbers */ sscanf(PQgetvalue(res, 0, 0), "%*s %d.%d", &(opts->major), &(opts->minor)); /* print version */ if (opts->verbose) printf("Detected release: %d.%d\n", opts->major, opts->minor); /* cleanup */ PQclear(res); } /* * Compare given major and minor numbers to the one of the connected server */ bool backend_minimum_version(int major, int minor) { return opts->major > major || (opts->major == major && opts->minor >= minor); } /* * Close the PostgreSQL connection, and quit */ static void quit_properly(SIGNAL_ARGS) { drop_env(); PQfinish(conn); exit(1); } /* * Create function */ void build_env() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* build the DDL query */ snprintf(sql, sizeof(sql), "CREATE TEMPORARY TABLE waitevents (we text, wet text, o integer);\n" "ALTER TABLE waitevents ADD UNIQUE(we, wet);\n"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* print verbose */ if (opts->verbose) printf("Temporary table created\n"); /* cleanup */ PQclear(res); /* build the DDL query */ snprintf(sql, sizeof(sql), "CREATE OR REPLACE FUNCTION trace_wait_events_for_pid(p integer, leader boolean, s numeric default 1)\n" "RETURNS TABLE (wait_event text, wait_event_type text, occurences integer, percent numeric(5,2))\n" "LANGUAGE plpgsql\n" "AS $$\n" "DECLARE\n" " q text;\n" " r record;\n" "BEGIN\n" " -- check it is a backend\n" " SELECT query INTO q FROM pg_stat_activity\n" " WHERE pid=p AND backend_type='client backend' AND state='active';\n" "\n" " IF NOT FOUND THEN\n" " RAISE EXCEPTION 'PID %% doesn''t appear to be an active backend', p\n" " USING HINT = 'Check the PID and its state';\n" " END IF;\n" "\n" " -- logging\n" " RAISE LOG 'Tracing PID %%, sampling at %%s', p, s;\n" " RAISE LOG 'Query is <%%>', q;\n" "\n" " -- drop if exists, then create temp table\n" " TRUNCATE waitevents;\n" "\n" " -- loop till the end of the query\n" " LOOP\n" " -- get wait event\n" " IF leader THEN\n" " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" " INTO r\n" " FROM pg_stat_activity psa\n" " WHERE pid=p OR leader_pid=p;\n" " ELSE\n" " SELECT COALESCE(psa.wait_event, '[Running]') AS wait_event,\n" " COALESCE(psa.wait_event_type, '') AS wait_event_type\n" " INTO r\n" " FROM pg_stat_activity psa\n" " WHERE pid=p;\n" " END IF;\n" "\n" " -- loop control\n" " EXIT WHEN r.wait_event = 'ClientRead';\n" "\n" " -- update wait events stats\n" " INSERT INTO waitevents VALUES (r.wait_event, r.wait_event_type, 1)\n" " ON CONFLICT (we,wet) DO UPDATE SET o = waitevents.o+1;\n" "\n" " -- sleep a bit\n" " PERFORM pg_sleep(s);\n" " END LOOP;\n" "\n" " -- return stats\n" " RETURN QUERY\n" " SELECT we, wet, o, (o*100./sum(o) over ())::numeric(5,2)\n" " FROM waitevents\n" " ORDER BY o DESC;\n" "END\n" "$$;\n"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* print verbose */ if (opts->verbose) printf("Function created\n"); /* cleanup */ PQclear(res); } /* * Is PID an active session? */ bool active_session() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; bool active = false; /* build the query */ snprintf(sql, sizeof(sql), "SELECT state, query, query_start, now() FROM pg_stat_activity\n" "WHERE backend_type='client backend'\n" "AND pid=%d", opts->pid); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* if zero row, then PID is gone */ if (PQntuples(res) == 0) { printf("\nNo more session with PID %d, exiting...\n", opts->pid); drop_env(); PQfinish(conn); exit(2); } /* if one row, we found the good one */ if (PQntuples(res) == 1) { if (!strncmp(PQgetvalue(res, 0, 0), "active", 6)) { active = true; printf("\nNew query: %s\n", PQgetvalue(res, 0, 1)); opts->query_start = pg_strdup(PQgetvalue(res, 0, 2)); opts->trace_start = pg_strdup(PQgetvalue(res, 0, 3)); } } /* this also means that in case of multiple rows, we treat it as no rows */ /* cleanup */ PQclear(res); return active; } /* * Handle query */ void handle_current_query() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *workers_res; PGresult *trace_res; PGresult *duration_res; int nrows; int row; int nworkers = 0; if (opts->includeleaderworkers) { /* build the workers query if the user asked to include leader and workers */ snprintf(sql, sizeof(sql), "SELECT count(*) FROM pg_stat_activity " "WHERE pid=%d OR leader_pid=%d", opts->pid, opts->pid); /* execute it */ workers_res = PQexec(conn, sql); /* check and deal with errors */ if (!workers_res || PQresultStatus(workers_res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(workers_res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* get the number of leader and workers */ nworkers = atoi(PQgetvalue(workers_res, 0, 0)); /* clean up */ PQclear(workers_res); } /* build the trace query */ snprintf(sql, sizeof(sql), "SELECT * FROM trace_wait_events_for_pid(%d, %s, %f);", opts->pid, opts->includeleaderworkers ? "'t'" : "'f'", opts->interval); /* execute it */ trace_res = PQexec(conn, sql); /* check and deal with errors */ if (!trace_res || PQresultStatus(trace_res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(trace_res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* build the duration query */ snprintf(sql, sizeof(sql), "SELECT now()-'%s'::timestamptz, now()-'%s'::timestamptz;", opts->query_start, opts->trace_start); /* execute it */ duration_res = PQexec(conn, sql); /* check and deal with errors */ if (!duration_res || PQresultStatus(duration_res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(duration_res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* show durations */ (void)printf("Query duration: %s\n", PQgetvalue(duration_res, 0, 0)); (void)printf("Trace duration: %s\n", PQgetvalue(duration_res, 0, 1)); /* show number of workers */ if (opts->includeleaderworkers) { (void)printf("Number of processes: %d\n", nworkers); } /* get the number of rows */ nrows = PQntuples(trace_res); /* print headers */ (void)printf( "┌───────────────────────────────────┬───────────┬────────────┬─────────┐\n" "│ Wait event │ WE type │ Occurences │ Percent │\n" "├───────────────────────────────────┼───────────┼────────────┼─────────┤\n"); /* for each row, print all columns in a row */ for (row = 0; row < nrows; row++) { (void)printf("│ %-33s │ %-9s │ %10ld │ %6.2f │\n", PQgetvalue(trace_res, row, 0), PQgetvalue(trace_res, row, 1), atol(PQgetvalue(trace_res, row, 2)), atof(PQgetvalue(trace_res, row, 3)) ); } /* print footers */ (void)printf( "└───────────────────────────────────┴───────────┴────────────┴─────────┘\n"); /* cleanup */ PQclear(duration_res); PQclear(trace_res); } /* * Drop env */ void drop_env() { char sql[PGWAITEVENT_DEFAULT_STRING_SIZE]; PGresult *res; /* no need to drop the temp table */ /* drop function */ snprintf(sql, sizeof(sql), "DROP FUNCTION trace_wait_events_for_pid(integer, boolean, numeric)"); /* make the call */ res = PQexec(conn, sql); /* check and deal with errors */ if (!res || PQresultStatus(res) > 2) { warnx("pgwaitevent: query failed: %s", PQerrorMessage(conn)); PQclear(res); PQfinish(conn); errx(1, "pgwaitevent: query was: %s", sql); } /* print verbose */ if (opts->verbose) printf("Function dropped\n"); /* cleanup */ PQclear(res); } /* * Main function */ int main(int argc, char **argv) { /* * If the user stops the program, * quit nicely. */ pqsignal(SIGINT, quit_properly); /* Allocate the options struct */ opts = (struct options *) pg_malloc(sizeof(struct options)); /* Parse the options */ get_opts(argc, argv); /* Connect to the database */ conn = sql_conn(); /* Fetch version */ fetch_version(); /* Check options */ if (opts->includeleaderworkers && !backend_minimum_version(13, 0)) { errx(1, "You need at least v13 to include workers' wait events."); } /* Create the trace_wait_events_for_pid function */ build_env(); /* show what we're doing */ printf("Tracing wait events for PID %d, sampling at %.3fs, %s\n", opts->pid, opts->interval, opts->includeleaderworkers ? "including leader and workers" : "PID only"); while(true) { if (active_session()) { /* Handle query currently executed */ handle_current_query(); } /* wait 100ms */ (void)usleep(100000); } /* Drop the function */ drop_env(); PQfinish(conn); return 0; }