pax_global_header00006660000000000000000000000064145274600770014526gustar00rootroot0000000000000052 comment=78421dcc30a59fb9fd9d7bfeda97009f7050f112 pg_repack-ver_1.5.0/000077500000000000000000000000001452746007700143405ustar00rootroot00000000000000pg_repack-ver_1.5.0/.github/000077500000000000000000000000001452746007700157005ustar00rootroot00000000000000pg_repack-ver_1.5.0/.github/workflows/000077500000000000000000000000001452746007700177355ustar00rootroot00000000000000pg_repack-ver_1.5.0/.github/workflows/regression.yml000066400000000000000000000021171452746007700226410ustar00rootroot00000000000000name: make installcheck on: [push, pull_request] jobs: test: strategy: fail-fast: false matrix: pg: - 16 - 15 - 14 - 13 - 12 - 11 - 10 name: PostgreSQL ${{ matrix.pg }} runs-on: ubuntu-latest container: pgxn/pgxn-tools steps: - name: Start PostgreSQL ${{ matrix.pg }} run: pg-start ${{ matrix.pg }} - name: Install build-dependencies run: apt-get install -y liblz4-dev libreadline-dev zlib1g-dev libzstd-dev - name: Check out the repo uses: actions/checkout@v3 - name: Put pg_repack on PATH run: echo "$PWD/bin" >> $GITHUB_PATH - name: Create testts directory run: sudo -u postgres mkdir /tmp/testts - name: Create testts tablespace run: sudo -u postgres psql -c "CREATE TABLESPACE testts LOCATION '/tmp/testts'" - name: Test on PostgreSQL ${{ matrix.pg }} run: pg-build-test - name: Show regression.diffs if: ${{ failure() }} run: cat regress/regression.diffs pg_repack-ver_1.5.0/.gitignore000066400000000000000000000002271452746007700163310ustar00rootroot00000000000000# Global excludes across all subdirectories *.o *.so *.bc regress/regression.diffs regress/regression.out regress/results/ dist/*.zip lib/exports.list pg_repack-ver_1.5.0/COPYRIGHT000066400000000000000000000031761452746007700156420ustar00rootroot00000000000000Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 2011, Itagaki Takahiro Portions Copyright (c) 2012-2020, The Reorg Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the authors nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pg_repack-ver_1.5.0/META.json000066400000000000000000000025451452746007700157670ustar00rootroot00000000000000{ "name": "pg_repack", "abstract": "PostgreSQL module for data reorganization", "description": "Reorganize tables in PostgreSQL databases with minimal locks", "version": "1.5.0", "maintainer": [ "Beena Emerson ", "Josh Kupershmidt ", "Masahiko Sawada ", "Daniele Varrazzo ", "Artur Zakirov ", "Andreas Scherbaum " ], "tags": [ "bloat", "maintenance", "vacuum", "cluster" ], "release_status": "stable", "license": "bsd", "provides": { "pg_repack": { "file": "lib/pg_repack.sql", "version": "1.5.0", "abstract": "Reorganize tables in PostgreSQL databases with minimal locks" } }, "prereqs": { "runtime": { "requires": { "PostgreSQL": "9.4.0" } } }, "resources": { "homepage": "https://reorg.github.io/pg_repack", "bugtracker": { "web": "https://github.com/reorg/pg_repack/issues" }, "repository": { "url": "git://github.com/reorg/pg_repack.git", "web": "https://github.com/reorg/pg_repack/", "type": "git" } }, "meta-spec": { "version": "1.0.0", "url": "https://pgxn.org/meta/spec.txt" } } pg_repack-ver_1.5.0/Makefile000066400000000000000000000031201452746007700157740ustar00rootroot00000000000000# # pg_repack: Makefile # # Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # Portions Copyright (c) 2011, Itagaki Takahiro # Portions Copyright (c) 2012-2020, The Reorg Development Team # PG_CONFIG ?= pg_config EXTENSION = pg_repack .PHONY: dist/$(EXTENSION)-$(EXTVERSION).zip # Pull out PostgreSQL version number from pg_config VERSION := $(shell $(PG_CONFIG) --version | sed 's/.* \([[:digit:].]\{1,\}\).*/\1/') ifeq ("$(VERSION)","") $(error pg_config not found) endif # PostgreSQL version as a number, e.g. 9.1.4 -> 901 INTVERSION := $(shell echo $$(($$(echo $(VERSION).0 | sed 's/\([[:digit:]]\{1,\}\)\.\([[:digit:]]\{1,\}\).*/\1*100+\2/')))) # The version number of the library EXTVERSION = $(shell grep '"version":' META.json | head -1 \ | sed -e 's/[ ]*"version":[ ]*"\(.*\)",/\1/') # NOTE: keep consistent with META.json ifeq ($(shell echo $$(($(INTVERSION) < 904))),1) $(error $(EXTENSION) requires PostgreSQL 9.4 or later. This is $(VERSION)) endif SUBDIRS = bin lib regress all install installdirs uninstall distprep clean distclean maintainer-clean debug: @for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir $@ || exit; \ done # We'd like check operations to run all the subtests before failing. check installcheck: @CHECKERR=0; for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir $@ || CHECKERR=$$?; \ done; \ exit $$CHECKERR # Prepare the package for PGXN submission package: dist dist/$(EXTENSION)-$(EXTVERSION).zip dist: mkdir -p dist dist/$(EXTENSION)-$(EXTVERSION).zip: git archive --format zip --prefix=$(EXTENSION)-$(EXTVERSION)/ --output $@ master pg_repack-ver_1.5.0/README.rst000066400000000000000000000042171452746007700160330ustar00rootroot00000000000000pg_repack -- Reorganize tables in PostgreSQL databases with minimal locks ========================================================================= - Homepage: https://reorg.github.io/pg_repack - Download: https://pgxn.org/dist/pg_repack/ - Development: https://github.com/reorg/pg_repack - Bug Report: https://github.com/reorg/pg_repack/issues |GitHub Actions| .. |GitHub Actions| image:: https://github.com/reorg/pg_repack/actions/workflows/regression.yml/badge.svg :target: https://github.com/reorg/pg_repack/actions/workflows/regression.yml :alt: Linux build status pg_repack_ is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore the physical order of clustered indexes. Unlike CLUSTER_ and `VACUUM FULL`_ it works online, without holding an exclusive lock on the processed tables during processing. pg_repack is efficient to boot, with performance comparable to using CLUSTER directly. Please check the documentation (in the ``doc`` directory or online_) for installation and usage instructions. .. _pg_repack: https://reorg.github.io/pg_repack .. _CLUSTER: https://www.postgresql.org/docs/current/static/sql-cluster.html .. _VACUUM FULL: VACUUM_ .. _VACUUM: https://www.postgresql.org/docs/current/static/sql-vacuum.html .. _online: pg_repack_ .. _issue: https://github.com/reorg/pg_repack/issues/23 What about pg_reorg? -------------------- pg_repack is a fork of the pg_reorg_ project, which has proven hugely successful. Unfortunately new feature development on pg_reorg_ has slowed or stopped since late 2011. pg_repack was initially released as a drop-in replacement for pg_reorg, addressing some of the shortcomings of the last pg_reorg version (such as support for PostgreSQL 9.2 and EXTENSION packaging) and known bugs. pg_repack 1.2 introduces further new features (parallel index builds, ability to rebuild only indexes) and bugfixes. In some cases its behaviour may be different from the 1.1.x release so it shouldn't be considered a drop-in replacement: you are advised to check the documentation__ before upgrading from previous versions. .. __: pg_repack_ .. _pg_reorg: https://github.com/reorg/pg_reorg pg_repack-ver_1.5.0/bin/000077500000000000000000000000001452746007700151105ustar00rootroot00000000000000pg_repack-ver_1.5.0/bin/.gitignore000066400000000000000000000000751452746007700171020ustar00rootroot00000000000000/.deps/ /pg_repack /results/ /sql/init.sql /sql/init-*.*.sql pg_repack-ver_1.5.0/bin/Makefile000066400000000000000000000022101452746007700165430ustar00rootroot00000000000000# # pg_repack: bin/Makefile # # Portions Copyright (c) 2008-2012, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # Portions Copyright (c) 2011, Itagaki Takahiro # Portions Copyright (c) 2012-2020, The Reorg Development Team # PG_CONFIG ?= pg_config SRCS = pg_repack.c pgut/pgut.c pgut/pgut-fe.c OBJS = $(SRCS:.c=.o) PROGRAM = pg_repack # The version number of the program. It should be the same of the library. REPACK_VERSION = $(shell grep '"version":' ../META.json | head -1 \ | sed -e 's/[ ]*"version":[ ]*"\(.*\)",/\1/') PG_CPPFLAGS = -I$(libpq_srcdir) -DREPACK_VERSION=$(REPACK_VERSION) ifdef DEBUG_REPACK PG_CPPFLAGS += -DDEBUG_REPACK endif PG_LIBS = $(libpq) # libs pgport, pgcommon moved somewhere else in some ubuntu version # see ticket #179 PG_LIBS += -L$(shell $(PG_CONFIG) --pkglibdir) USE_PGXS = 1 # use pgxs if not in contrib directory PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) # remove dependency on libxml2, libxslt, and libpam. # XXX: find a better way to make sure we are linking with libraries # from pg_config which we actually need. LIBS := $(filter-out -ledit -lgssapi_krb5 -lpam -lselinux -lxml2 -lxslt, $(LIBS)) pg_repack-ver_1.5.0/bin/pg_repack.c000066400000000000000000002110601452746007700172070ustar00rootroot00000000000000/* * pg_repack.c: bin/pg_repack.c * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team */ /** * @brief Client Modules */ const char *PROGRAM_URL = "https://reorg.github.io/pg_repack/"; const char *PROGRAM_ISSUES = "https://github.com/reorg/pg_repack/issues"; #ifdef REPACK_VERSION /* macro trick to stringify a macro expansion */ #define xstr(s) str(s) #define str(s) #s const char *PROGRAM_VERSION = xstr(REPACK_VERSION); #else const char *PROGRAM_VERSION = "unknown"; #endif #include "pgut/pgut-fe.h" #include #include #include #include #include #ifdef HAVE_POLL_H #include #endif #ifdef HAVE_SYS_POLL_H #include #endif #ifdef HAVE_SYS_SELECT_H #include #endif /* * APPLY_COUNT: Number of applied logs per transaction. Larger values * could be faster, but will be long transactions in the REDO phase. */ #define APPLY_COUNT 1000 /* Once we get down to seeing fewer than this many tuples in the * log table, we'll say that we're ready to perform the switch. */ #define SWITCH_THRESHOLD_DEFAULT 100 /* poll() or select() timeout, in seconds */ #define POLL_TIMEOUT 3 /* Compile an array of existing transactions which are active during * pg_repack's setup. Some transactions we can safely ignore: * a. The '1/1, -1/0' lock skipped is from the bgwriter on newly promoted * servers. See https://github.com/reorg/pg_reorg/issues/1 * b. Our own database connections * c. Other pg_repack clients, as distinguished by application_name, which * may be operating on other tables at the same time. See * https://github.com/reorg/pg_repack/issues/1 * d. open transactions/locks existing on other databases than the actual * processing relation (except for locks on shared objects) * e. VACUUMs which are always executed outside transaction blocks. * * Note, there is some redundancy in how the filtering is done (e.g. excluding * based on pg_backend_pid() and application_name), but that shouldn't hurt * anything. Also, the test of application_name is not bulletproof -- for * instance, the application name when running installcheck will be * pg_regress. */ #define SQL_XID_SNAPSHOT_90200 \ "SELECT coalesce(array_agg(l.virtualtransaction), '{}') " \ " FROM pg_locks AS l " \ " LEFT JOIN pg_stat_activity AS a " \ " ON l.pid = a.pid " \ " LEFT JOIN pg_database AS d " \ " ON a.datid = d.oid " \ " WHERE l.locktype = 'virtualxid' " \ " AND l.pid NOT IN (pg_backend_pid(), $1) " \ " AND (l.virtualxid, l.virtualtransaction) <> ('1/1', '-1/0') " \ " AND (a.application_name IS NULL OR a.application_name <> $2)" \ " AND a.query !~* E'^\\\\s*vacuum\\\\s+' " \ " AND a.query !~ E'^autovacuum: ' " \ " AND ((d.datname IS NULL OR d.datname = current_database()) OR l.database = 0)" #define SQL_XID_SNAPSHOT_90000 \ "SELECT coalesce(array_agg(l.virtualtransaction), '{}') " \ " FROM pg_locks AS l " \ " LEFT JOIN pg_stat_activity AS a " \ " ON l.pid = a.procpid " \ " LEFT JOIN pg_database AS d " \ " ON a.datid = d.oid " \ " WHERE l.locktype = 'virtualxid' " \ " AND l.pid NOT IN (pg_backend_pid(), $1) " \ " AND (l.virtualxid, l.virtualtransaction) <> ('1/1', '-1/0') " \ " AND (a.application_name IS NULL OR a.application_name <> $2)" \ " AND a.current_query !~* E'^\\\\s*vacuum\\\\s+' " \ " AND a.current_query !~ E'^autovacuum: ' " \ " AND ((d.datname IS NULL OR d.datname = current_database()) OR l.database = 0)" /* application_name is not available before 9.0. The last clause of * the WHERE clause is just to eat the $2 parameter (application name). */ #define SQL_XID_SNAPSHOT_80300 \ "SELECT coalesce(array_agg(l.virtualtransaction), '{}') " \ " FROM pg_locks AS l" \ " LEFT JOIN pg_stat_activity AS a " \ " ON l.pid = a.procpid " \ " LEFT JOIN pg_database AS d " \ " ON a.datid = d.oid " \ " WHERE l.locktype = 'virtualxid' AND l.pid NOT IN (pg_backend_pid(), $1)" \ " AND (l.virtualxid, l.virtualtransaction) <> ('1/1', '-1/0') " \ " AND a.current_query !~* E'^\\\\s*vacuum\\\\s+' " \ " AND a.current_query !~ E'^autovacuum: ' " \ " AND ((d.datname IS NULL OR d.datname = current_database()) OR l.database = 0)" \ " AND ($2::text IS NOT NULL)" #define SQL_XID_SNAPSHOT \ (PQserverVersion(connection) >= 90200 ? SQL_XID_SNAPSHOT_90200 : \ (PQserverVersion(connection) >= 90000 ? SQL_XID_SNAPSHOT_90000 : \ SQL_XID_SNAPSHOT_80300)) /* Later, check whether any of the transactions we saw before are still * alive, and wait for them to go away. */ #define SQL_XID_ALIVE \ "SELECT pid FROM pg_locks WHERE locktype = 'virtualxid'"\ " AND pid <> pg_backend_pid() AND virtualtransaction = ANY($1)" /* To be run while our main connection holds an AccessExclusive lock on the * target table, and our secondary conn is attempting to grab an AccessShare * lock. We know that "granted" must be false for these queries because * we already hold the AccessExclusive lock. Also, we only care about other * transactions trying to grab an ACCESS EXCLUSIVE lock, because we are only * trying to kill off disallowed DDL commands, e.g. ALTER TABLE or TRUNCATE. */ #define CANCEL_COMPETING_LOCKS \ "SELECT pg_cancel_backend(pid) FROM pg_locks WHERE locktype = 'relation'"\ " AND granted = false AND relation = %u"\ " AND mode = 'AccessExclusiveLock' AND pid <> pg_backend_pid()" #define KILL_COMPETING_LOCKS \ "SELECT pg_terminate_backend(pid) "\ "FROM pg_locks WHERE locktype = 'relation'"\ " AND granted = false AND relation = %u"\ " AND mode = 'AccessExclusiveLock' AND pid <> pg_backend_pid()" #define COUNT_COMPETING_LOCKS \ "SELECT pid FROM pg_locks WHERE locktype = 'relation'" \ " AND granted = false AND relation = %u" \ " AND mode = 'AccessExclusiveLock' AND pid <> pg_backend_pid()" /* Will be used as a unique prefix for advisory locks. */ #define REPACK_LOCK_PREFIX_STR "16185446" typedef enum { UNPROCESSED, INPROGRESS, FINISHED } index_status_t; /* * per-index information */ typedef struct repack_index { Oid target_oid; /* target: OID */ const char *create_index; /* CREATE INDEX */ index_status_t status; /* Track parallel build statuses. */ int worker_idx; /* which worker conn is handling */ } repack_index; /* * per-table information */ typedef struct repack_table { const char *target_name; /* target: relname */ Oid target_oid; /* target: OID */ Oid target_toast; /* target: toast OID */ Oid target_tidx; /* target: toast index OID */ Oid pkid; /* target: PK OID */ Oid ckid; /* target: CK OID */ const char *create_pktype; /* CREATE TYPE pk */ const char *create_log; /* CREATE TABLE log */ const char *create_trigger; /* CREATE TRIGGER repack_trigger */ const char *enable_trigger; /* ALTER TABLE ENABLE ALWAYS TRIGGER repack_trigger */ const char *create_table; /* CREATE TABLE table AS SELECT WITH NO DATA*/ const char *dest_tablespace; /* Destination tablespace */ const char *copy_data; /* INSERT INTO */ const char *alter_col_storage; /* ALTER TABLE ALTER COLUMN SET STORAGE */ const char *drop_columns; /* ALTER TABLE DROP COLUMNs */ const char *delete_log; /* DELETE FROM log */ const char *lock_table; /* LOCK TABLE table */ const char *sql_peek; /* SQL used in flush */ const char *sql_insert; /* SQL used in flush */ const char *sql_delete; /* SQL used in flush */ const char *sql_update; /* SQL used in flush */ const char *sql_pop; /* SQL used in flush */ int n_indexes; /* number of indexes */ repack_index *indexes; /* info on each index */ } repack_table; static bool is_superuser(void); static void check_tablespace(void); static bool preliminary_checks(char *errbuf, size_t errsize); static bool is_requested_relation_exists(char *errbuf, size_t errsize); static void repack_all_databases(const char *order_by); static bool repack_one_database(const char *order_by, char *errbuf, size_t errsize); static void repack_one_table(repack_table *table, const char *order_by); static bool repack_table_indexes(PGresult *index_details); static bool repack_all_indexes(char *errbuf, size_t errsize); static void repack_cleanup(bool fatal, const repack_table *table); static void repack_cleanup_callback(bool fatal, void *userdata); static bool rebuild_indexes(const repack_table *table); static char *getstr(PGresult *res, int row, int col); static Oid getoid(PGresult *res, int row, int col); static bool advisory_lock(PGconn *conn, const char *relid); static bool lock_exclusive(PGconn *conn, const char *relid, const char *lock_query, bool start_xact); static bool kill_ddl(PGconn *conn, Oid relid, bool terminate); static bool lock_access_share(PGconn *conn, Oid relid, const char *target_name); #define SQLSTATE_INVALID_SCHEMA_NAME "3F000" #define SQLSTATE_UNDEFINED_FUNCTION "42883" #define SQLSTATE_LOCK_NOT_AVAILABLE "55P03" static bool sqlstate_equals(PGresult *res, const char *state) { return strcmp(PQresultErrorField(res, PG_DIAG_SQLSTATE), state) == 0; } static bool analyze = true; static bool alldb = false; static bool noorder = false; static SimpleStringList parent_table_list = {NULL, NULL}; static SimpleStringList table_list = {NULL, NULL}; static SimpleStringList schema_list = {NULL, NULL}; static char *orderby = NULL; static char *tablespace = NULL; static bool moveidx = false; static SimpleStringList r_index = {NULL, NULL}; static bool only_indexes = false; static int wait_timeout = 60; /* in seconds */ static int jobs = 0; /* number of concurrent worker conns. */ static bool dryrun = false; static unsigned int temp_obj_num = 0; /* temporary objects counter */ static bool no_kill_backend = false; /* abandon when timed-out */ static bool no_superuser_check = false; static SimpleStringList exclude_extension_list = {NULL, NULL}; /* don't repack tables of these extensions */ static bool error_on_invalid_index = false; /* don't repack when invalid index is found */ static int switch_threshold = SWITCH_THRESHOLD_DEFAULT; /* buffer should have at least 11 bytes */ static char * utoa(unsigned int value, char *buffer) { sprintf(buffer, "%u", value); return buffer; } static pgut_option options[] = { { 'b', 'a', "all", &alldb }, { 'l', 't', "table", &table_list }, { 'l', 'I', "parent-table", &parent_table_list }, { 'l', 'c', "schema", &schema_list }, { 'b', 'n', "no-order", &noorder }, { 'b', 'N', "dry-run", &dryrun }, { 's', 'o', "order-by", &orderby }, { 's', 's', "tablespace", &tablespace }, { 'b', 'S', "moveidx", &moveidx }, { 'l', 'i', "index", &r_index }, { 'b', 'x', "only-indexes", &only_indexes }, { 'i', 'T', "wait-timeout", &wait_timeout }, { 'B', 'Z', "no-analyze", &analyze }, { 'i', 'j', "jobs", &jobs }, { 'b', 'D', "no-kill-backend", &no_kill_backend }, { 'b', 'k', "no-superuser-check", &no_superuser_check }, { 'l', 'C', "exclude-extension", &exclude_extension_list }, { 'b', 2, "error-on-invalid-index", &error_on_invalid_index }, { 'i', 1, "switch-threshold", &switch_threshold }, { 0 }, }; int main(int argc, char *argv[]) { int i; char errbuf[256]; i = pgut_getopt(argc, argv, options); if (i == argc - 1) dbname = argv[i]; else if (i < argc) ereport(ERROR, (errcode(EINVAL), errmsg("too many arguments"))); check_tablespace(); if (dryrun) elog(INFO, "Dry run enabled, not executing repack"); if (r_index.head || only_indexes) { if (r_index.head && table_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --index (-i) and --table (-t)"))); if (r_index.head && parent_table_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --index (-i) and --parent-table (-I)"))); else if (r_index.head && only_indexes) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --index (-i) and --only-indexes (-x)"))); else if (r_index.head && exclude_extension_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --index (-i) and --exclude-extension (-C)"))); else if (only_indexes && !(table_list.head || parent_table_list.head)) ereport(ERROR, (errcode(EINVAL), errmsg("cannot repack all indexes of database, specify the table(s)" "via --table (-t) or --parent-table (-I)"))); else if (only_indexes && exclude_extension_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --only-indexes (-x) and --exclude-extension (-C)"))); else if (alldb) ereport(ERROR, (errcode(EINVAL), errmsg("cannot repack specific index(es) in all databases"))); else { if (orderby) ereport(WARNING, (errcode(EINVAL), errmsg("option -o (--order-by) has no effect while repacking indexes"))); else if (noorder) ereport(WARNING, (errcode(EINVAL), errmsg("option -n (--no-order) has no effect while repacking indexes"))); else if (!analyze) ereport(WARNING, (errcode(EINVAL), errmsg("ANALYZE is not performed after repacking indexes, -z (--no-analyze) has no effect"))); else if (jobs) ereport(WARNING, (errcode(EINVAL), errmsg("option -j (--jobs) has no effect, repacking indexes does not use parallel jobs"))); if (!repack_all_indexes(errbuf, sizeof(errbuf))) ereport(ERROR, (errcode(ERROR), errmsg("%s", errbuf))); } } else { if (schema_list.head && (table_list.head || parent_table_list.head)) ereport(ERROR, (errcode(EINVAL), errmsg("cannot repack specific table(s) in schema, use schema.table notation instead"))); if (exclude_extension_list.head && table_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --table (-t) and --exclude-extension (-C)"))); if (exclude_extension_list.head && parent_table_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --parent-table (-I) and --exclude-extension (-C)"))); if (noorder) orderby = ""; if (alldb) { if (table_list.head || parent_table_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot repack specific table(s) in all databases"))); if (schema_list.head) ereport(ERROR, (errcode(EINVAL), errmsg("cannot repack specific schema(s) in all databases"))); repack_all_databases(orderby); } else { if (!repack_one_database(orderby, errbuf, sizeof(errbuf))) ereport(ERROR, (errcode(ERROR), errmsg("%s failed with error: %s", PROGRAM_NAME, errbuf))); } } return 0; } /* * Test if the current user is a database superuser. * Borrowed from psql/common.c * * Note: this will correctly detect superuserness only with a protocol-3.0 * or newer backend; otherwise it will always say "false". */ bool is_superuser(void) { const char *val; if (no_superuser_check) return true; if (!connection) return false; val = PQparameterStatus(connection, "is_superuser"); if (val && strcmp(val, "on") == 0) return true; return false; } /* * Check if the tablespace requested exists. * * Raise an exception on error. */ void check_tablespace() { PGresult *res = NULL; const char *params[1]; if (tablespace == NULL) { /* nothing to check, but let's see the options */ if (moveidx) { ereport(ERROR, (errcode(EINVAL), errmsg("cannot specify --moveidx (-S) without --tablespace (-s)"))); } return; } /* check if the tablespace exists */ reconnect(ERROR); params[0] = tablespace; res = execute_elevel( "select spcname from pg_tablespace where spcname = $1", 1, params, DEBUG2); if (PQresultStatus(res) == PGRES_TUPLES_OK) { if (PQntuples(res) == 0) { ereport(ERROR, (errcode(EINVAL), errmsg("the tablespace \"%s\" doesn't exist", tablespace))); } } else { ereport(ERROR, (errcode(EINVAL), errmsg("error checking the namespace: %s", PQerrorMessage(connection)))); } CLEARPGRES(res); } /* * Perform sanity checks before beginning work. Make sure pg_repack is * installed in the database, the user is a superuser, etc. */ static bool preliminary_checks(char *errbuf, size_t errsize){ bool ret = false; PGresult *res = NULL; if (!is_superuser()) { if (errbuf) snprintf(errbuf, errsize, "You must be a superuser to use %s", PROGRAM_NAME); goto cleanup; } /* Query the extension version. Exit if no match */ res = execute_elevel("select repack.version(), repack.version_sql()", 0, NULL, DEBUG2); if (PQresultStatus(res) == PGRES_TUPLES_OK) { const char *libver; char buf[64]; /* the string is something like "pg_repack 1.1.7" */ snprintf(buf, sizeof(buf), "%s %s", PROGRAM_NAME, PROGRAM_VERSION); /* check the version of the C library */ libver = getstr(res, 0, 0); if (0 != strcmp(buf, libver)) { if (errbuf) snprintf(errbuf, errsize, "program '%s' does not match database library '%s'", buf, libver); goto cleanup; } /* check the version of the SQL extension */ libver = getstr(res, 0, 1); if (0 != strcmp(buf, libver)) { if (errbuf) snprintf(errbuf, errsize, "extension '%s' required, found '%s';" " please drop and re-create the extension", buf, libver); goto cleanup; } } else { if (sqlstate_equals(res, SQLSTATE_INVALID_SCHEMA_NAME) || sqlstate_equals(res, SQLSTATE_UNDEFINED_FUNCTION)) { /* Schema repack does not exist, or version too old (version * functions not found). Skip the database. */ if (errbuf) snprintf(errbuf, errsize, "%s %s is not installed in the database", PROGRAM_NAME, PROGRAM_VERSION); } else { /* Return the error message otherwise */ if (errbuf) snprintf(errbuf, errsize, "%s", PQerrorMessage(connection)); } goto cleanup; } CLEARPGRES(res); /* Disable statement timeout. */ command("SET statement_timeout = 0", 0, NULL); /* Restrict search_path to system catalog. */ command("SET search_path = pg_catalog, pg_temp, public", 0, NULL); /* To avoid annoying "create implicit ..." messages. */ command("SET client_min_messages = warning", 0, NULL); ret = true; cleanup: CLEARPGRES(res); return ret; } /* * Check the presence of tables specified by --parent-table and --table * otherwise format user-friendly message */ static bool is_requested_relation_exists(char *errbuf, size_t errsize){ bool ret = false; PGresult *res = NULL; const char **params = NULL; int iparam = 0; StringInfoData sql; int num_relations; SimpleStringListCell *cell; num_relations = simple_string_list_size(parent_table_list) + simple_string_list_size(table_list); /* nothing was implicitly requested, so nothing to do here */ if (num_relations == 0) return true; /* has no suitable to_regclass(text) */ if (PQserverVersion(connection)<90600) return true; params = pgut_malloc(num_relations * sizeof(char *)); initStringInfo(&sql); appendStringInfoString(&sql, "SELECT r FROM (VALUES "); for (cell = table_list.head; cell; cell = cell->next) { appendStringInfo(&sql, "($%d, 'r')", iparam + 1); params[iparam++] = cell->val; if (iparam < num_relations) appendStringInfoChar(&sql, ','); } for (cell = parent_table_list.head; cell; cell = cell->next) { appendStringInfo(&sql, "($%d, 'p')", iparam + 1); params[iparam++] = cell->val; if (iparam < num_relations) appendStringInfoChar(&sql, ','); } appendStringInfoString(&sql, ") AS given_t(r,kind) WHERE" /* regular --table relation or inherited --parent-table */ " NOT EXISTS(" " SELECT FROM repack.tables WHERE relid=to_regclass(given_t.r))" /* declarative partitioned --parent-table */ " AND NOT EXISTS(" " SELECT FROM pg_catalog.pg_class c WHERE c.oid=to_regclass(given_t.r) AND c.relkind = given_t.kind AND given_t.kind = 'p')" ); /* double check the parameters array is sane */ if (iparam != num_relations) { if (errbuf) snprintf(errbuf, errsize, "internal error: bad parameters count: %i instead of %i", iparam, num_relations); goto cleanup; } res = execute_elevel(sql.data, iparam, params, DEBUG2); if (PQresultStatus(res) == PGRES_TUPLES_OK) { int num; num = PQntuples(res); if (num != 0) { int i; StringInfoData rel_names; initStringInfo(&rel_names); for (i = 0; i < num; i++) { appendStringInfo(&rel_names, "\"%s\"", getstr(res, i, 0)); if ((i + 1) != num) appendStringInfoString(&rel_names, ", "); } if (errbuf) { if (num > 1) snprintf(errbuf, errsize, "relations do not exist: %s", rel_names.data); else snprintf(errbuf, errsize, "ERROR: relation %s does not exist", rel_names.data); } termStringInfo(&rel_names); } else ret = true; } else { if (errbuf) snprintf(errbuf, errsize, "%s", PQerrorMessage(connection)); } CLEARPGRES(res); cleanup: CLEARPGRES(res); termStringInfo(&sql); free(params); return ret; } /* * Call repack_one_database for each database. */ static void repack_all_databases(const char *orderby) { PGresult *result; int i; dbname = "postgres"; reconnect(ERROR); if (!is_superuser()) elog(ERROR, "You must be a superuser to use %s", PROGRAM_NAME); result = execute("SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", 0, NULL); disconnect(); for (i = 0; i < PQntuples(result); i++) { bool ret; char errbuf[256]; dbname = PQgetvalue(result, i, 0); elog(INFO, "repacking database \"%s\"", dbname); if (!dryrun) { ret = repack_one_database(orderby, errbuf, sizeof(errbuf)); if (!ret) elog(INFO, "database \"%s\" skipped: %s", dbname, errbuf); } } CLEARPGRES(result); } /* result is not copied */ static char * getstr(PGresult *res, int row, int col) { if (PQgetisnull(res, row, col)) return NULL; else return PQgetvalue(res, row, col); } static Oid getoid(PGresult *res, int row, int col) { if (PQgetisnull(res, row, col)) return InvalidOid; else return (Oid)strtoul(PQgetvalue(res, row, col), NULL, 10); } /* * Call repack_one_table for the target tables or each table in a database. */ static bool repack_one_database(const char *orderby, char *errbuf, size_t errsize) { bool ret = false; PGresult *res = NULL; int i; int num; StringInfoData sql; SimpleStringListCell *cell; const char **params = NULL; int iparam = 0; size_t num_parent_tables, num_tables, num_schemas, num_params, num_excluded_extensions; num_parent_tables = simple_string_list_size(parent_table_list); num_tables = simple_string_list_size(table_list); num_schemas = simple_string_list_size(schema_list); num_excluded_extensions = simple_string_list_size(exclude_extension_list); /* 1st param is the user-specified tablespace */ num_params = num_excluded_extensions + num_parent_tables + num_tables + num_schemas + 1; params = pgut_malloc(num_params * sizeof(char *)); initStringInfo(&sql); reconnect(ERROR); /* No sense in setting up concurrent workers if --jobs=1 */ if (jobs > 1) setup_workers(jobs); if (!preliminary_checks(errbuf, errsize)) goto cleanup; if (!is_requested_relation_exists(errbuf, errsize)) goto cleanup; /* acquire target tables */ appendStringInfoString(&sql, "SELECT t.*," " coalesce(v.tablespace, t.tablespace_orig) as tablespace_dest" " FROM repack.tables t, " " (VALUES (quote_ident($1::text))) as v (tablespace)" " WHERE "); params[iparam++] = tablespace; if (num_tables || num_parent_tables) { /* standalone tables */ if (num_tables) { appendStringInfoString(&sql, "("); for (cell = table_list.head; cell; cell = cell->next) { /* Construct table name placeholders to be used by PQexecParams */ appendStringInfo(&sql, "relid = $%d::regclass", iparam + 1); params[iparam++] = cell->val; if (cell->next) appendStringInfoString(&sql, " OR "); } appendStringInfoString(&sql, ")"); } if (num_tables && num_parent_tables) appendStringInfoString(&sql, " OR "); /* parent tables + inherited children */ if (num_parent_tables) { appendStringInfoString(&sql, "("); for (cell = parent_table_list.head; cell; cell = cell->next) { /* Construct table name placeholders to be used by PQexecParams */ appendStringInfo(&sql, "relid = ANY(repack.get_table_and_inheritors($%d::regclass))", iparam + 1); params[iparam++] = cell->val; if (cell->next) appendStringInfoString(&sql, " OR "); } appendStringInfoString(&sql, ")"); } } else if (num_schemas) { appendStringInfoString(&sql, "schemaname IN ("); for (cell = schema_list.head; cell; cell = cell->next) { /* Construct schema name placeholders to be used by PQexecParams */ appendStringInfo(&sql, "$%d", iparam + 1); params[iparam++] = cell->val; if (cell->next) appendStringInfoString(&sql, ", "); } appendStringInfoString(&sql, ")"); } else { appendStringInfoString(&sql, "pkid IS NOT NULL"); } /* Exclude tables which belong to extensions */ if (exclude_extension_list.head) { appendStringInfoString(&sql, " AND t.relid NOT IN" " (SELECT d.objid::regclass" " FROM pg_depend d JOIN pg_extension e" " ON d.refobjid = e.oid" " WHERE d.classid = 'pg_class'::regclass AND ("); /* List all excluded extensions */ for (cell = exclude_extension_list.head; cell; cell = cell->next) { appendStringInfo(&sql, "e.extname = $%d", iparam + 1); params[iparam++] = cell->val; appendStringInfoString(&sql, cell->next ? " OR " : ")"); } /* Close subquery */ appendStringInfoString(&sql, ")"); } /* Ensure the regression tests get a consistent ordering of tables */ appendStringInfoString(&sql, " ORDER BY t.relname, t.schemaname"); /* double check the parameters array is sane */ if (iparam != num_params) { if (errbuf) snprintf(errbuf, errsize, "internal error: bad parameters count: %i instead of %zi", iparam, num_params); goto cleanup; } res = execute_elevel(sql.data, (int) num_params, params, DEBUG2); /* on error skip the database */ if (PQresultStatus(res) != PGRES_TUPLES_OK) { /* Return the error message otherwise */ if (errbuf) snprintf(errbuf, errsize, "%s", PQerrorMessage(connection)); goto cleanup; } num = PQntuples(res); for (i = 0; i < num; i++) { repack_table table; StringInfoData copy_sql; const char *ckey; int c = 0; table.target_name = getstr(res, i, c++); table.target_oid = getoid(res, i, c++); table.target_toast = getoid(res, i, c++); table.target_tidx = getoid(res, i, c++); c++; // Skip schemaname table.pkid = getoid(res, i, c++); table.ckid = getoid(res, i, c++); if (table.pkid == 0) { ereport(WARNING, (errcode(E_PG_COMMAND), errmsg("relation \"%s\" must have a primary key or not-null unique keys", table.target_name))); continue; } table.create_pktype = getstr(res, i, c++); table.create_log = getstr(res, i, c++); table.create_trigger = getstr(res, i, c++); table.enable_trigger = getstr(res, i, c++); table.create_table = getstr(res, i, c++); getstr(res, i, c++); /* tablespace_orig is clobbered */ table.copy_data = getstr(res, i , c++); table.alter_col_storage = getstr(res, i, c++); table.drop_columns = getstr(res, i, c++); table.delete_log = getstr(res, i, c++); table.lock_table = getstr(res, i, c++); ckey = getstr(res, i, c++); table.sql_peek = getstr(res, i, c++); table.sql_insert = getstr(res, i, c++); table.sql_delete = getstr(res, i, c++); table.sql_update = getstr(res, i, c++); table.sql_pop = getstr(res, i, c++); table.dest_tablespace = getstr(res, i, c++); /* Craft Copy SQL */ initStringInfo(©_sql); appendStringInfoString(©_sql, table.copy_data); if (!orderby) { if (ckey != NULL) { /* CLUSTER mode */ appendStringInfoString(©_sql, " ORDER BY "); appendStringInfoString(©_sql, ckey); } /* else, VACUUM FULL mode (non-clustered tables) */ } else if (!orderby[0]) { /* VACUUM FULL mode (for clustered tables too), do nothing */ } else { /* User specified ORDER BY */ appendStringInfoString(©_sql, " ORDER BY "); appendStringInfoString(©_sql, orderby); } table.copy_data = copy_sql.data; repack_one_table(&table, orderby); } ret = true; cleanup: CLEARPGRES(res); disconnect(); termStringInfo(&sql); free(params); return ret; } static int apply_log(PGconn *conn, const repack_table *table, int count) { int result; PGresult *res; const char *params[6]; char buffer[12]; params[0] = table->sql_peek; params[1] = table->sql_insert; params[2] = table->sql_delete; params[3] = table->sql_update; params[4] = table->sql_pop; params[5] = utoa(count, buffer); res = pgut_execute(conn, "SELECT repack.repack_apply($1, $2, $3, $4, $5, $6)", 6, params); result = atoi(PQgetvalue(res, 0, 0)); CLEARPGRES(res); return result; } /* * Create indexes on temp table, possibly using multiple worker connections * concurrently if the user asked for --jobs=... */ static bool rebuild_indexes(const repack_table *table) { PGresult *res = NULL; int num_indexes; int i; int num_active_workers; int num_workers; repack_index *index_jobs; bool have_error = false; elog(DEBUG2, "---- create indexes ----"); num_indexes = table->n_indexes; /* We might have more actual worker connections than we need, * if the number of workers exceeds the number of indexes to be * built. In that case, ignore the extra workers. */ num_workers = num_indexes > workers.num_workers ? workers.num_workers : num_indexes; num_active_workers = num_workers; elog(DEBUG2, "Have %d indexes and num_workers=%d", num_indexes, num_workers); index_jobs = table->indexes; for (i = 0; i < num_indexes; i++) { elog(DEBUG2, "set up index_jobs [%d]", i); elog(DEBUG2, "target_oid : %u", index_jobs[i].target_oid); elog(DEBUG2, "create_index : %s", index_jobs[i].create_index); if (num_workers <= 1) { /* Use primary connection if we are not setting up parallel * index building, or if we only have one worker. */ command(index_jobs[i].create_index, 0, NULL); /* This bookkeeping isn't actually important in this no-workers * case, but just for clarity. */ index_jobs[i].status = FINISHED; } else if (i < num_workers) { /* Assign available worker to build an index. */ index_jobs[i].status = INPROGRESS; index_jobs[i].worker_idx = i; elog(LOG, "Initial worker %d to build index: %s", i, index_jobs[i].create_index); if (!(PQsendQuery(workers.conns[i], index_jobs[i].create_index))) { elog(WARNING, "Error sending async query: %s\n%s", index_jobs[i].create_index, PQerrorMessage(workers.conns[i])); have_error = true; goto cleanup; } } /* Else we have more indexes to be built than workers * available. That's OK, we'll get to them later. */ } if (num_workers > 1) { int freed_worker = -1; int ret; /* Prefer poll() over select(), following PostgreSQL custom. */ #ifdef HAVE_POLL struct pollfd *input_fds; input_fds = pgut_malloc(sizeof(struct pollfd) * num_workers); for (i = 0; i < num_workers; i++) { input_fds[i].fd = PQsocket(workers.conns[i]); input_fds[i].events = POLLIN | POLLERR; input_fds[i].revents = 0; } #else fd_set input_mask; struct timeval timeout; /* select() needs the highest-numbered socket descriptor */ int max_fd; #endif /* Now go through our index builds, and look for any which is * reported complete. Reassign that worker to the next index to * be built, if any. */ while (num_active_workers > 0) { elog(DEBUG2, "polling %d active workers", num_active_workers); #ifdef HAVE_POLL ret = poll(input_fds, num_workers, POLL_TIMEOUT * 1000); #else /* re-initialize timeout and input_mask before each * invocation of select(). I think this isn't * necessary on many Unixen, but just in case. */ timeout.tv_sec = POLL_TIMEOUT; timeout.tv_usec = 0; FD_ZERO(&input_mask); for (i = 0, max_fd = 0; i < num_workers; i++) { FD_SET(PQsocket(workers.conns[i]), &input_mask); if (PQsocket(workers.conns[i]) > max_fd) max_fd = PQsocket(workers.conns[i]); } ret = select(max_fd + 1, &input_mask, NULL, NULL, &timeout); #endif /* XXX: the errno != EINTR check means we won't bail * out on SIGINT. We should probably just remove this * check, though it seems we also need to fix up * the on_interrupt handling for workers' index * builds (those PGconns don't seem to have c->cancel * set, so we don't cancel the in-progress builds). */ if (ret < 0 && errno != EINTR) elog(ERROR, "poll() failed: %d, %d", ret, errno); elog(DEBUG2, "Poll returned: %d", ret); for (i = 0; i < num_indexes; i++) { if (index_jobs[i].status == INPROGRESS) { Assert(index_jobs[i].worker_idx >= 0); /* Must call PQconsumeInput before we can check PQisBusy */ if (PQconsumeInput(workers.conns[index_jobs[i].worker_idx]) != 1) { elog(WARNING, "Error fetching async query status: %s", PQerrorMessage(workers.conns[index_jobs[i].worker_idx])); have_error = true; goto cleanup; } if (!PQisBusy(workers.conns[index_jobs[i].worker_idx])) { elog(LOG, "Command finished in worker %d: %s", index_jobs[i].worker_idx, index_jobs[i].create_index); while ((res = PQgetResult(workers.conns[index_jobs[i].worker_idx]))) { if (PQresultStatus(res) != PGRES_COMMAND_OK) { elog(WARNING, "Error with create index: %s", PQerrorMessage(workers.conns[index_jobs[i].worker_idx])); have_error = true; goto cleanup; } CLEARPGRES(res); } /* We are only going to re-queue one worker, even * though more than one index build might be finished. * Any other jobs which may be finished will * just have to wait for the next pass through the * poll()/select() loop. */ freed_worker = index_jobs[i].worker_idx; index_jobs[i].status = FINISHED; num_active_workers--; break; } } } if (freed_worker > -1) { for (i = 0; i < num_indexes; i++) { if (index_jobs[i].status == UNPROCESSED) { index_jobs[i].status = INPROGRESS; index_jobs[i].worker_idx = freed_worker; elog(LOG, "Assigning worker %d to build index #%d: " "%s", freed_worker, i, index_jobs[i].create_index); if (!(PQsendQuery(workers.conns[freed_worker], index_jobs[i].create_index))) { elog(WARNING, "Error sending async query: %s\n%s", index_jobs[i].create_index, PQerrorMessage(workers.conns[freed_worker])); have_error = true; goto cleanup; } num_active_workers++; break; } } freed_worker = -1; } } } cleanup: CLEARPGRES(res); return (!have_error); } /* * Re-organize one table. */ static void repack_one_table(repack_table *table, const char *orderby) { PGresult *res = NULL; const char *params[3]; int num; char *vxid = NULL; char buffer[12]; StringInfoData sql; bool ret = false; PGresult *indexres = NULL; const char *indexparams[2]; char indexbuffer[12]; int j; /* appname will be "pg_repack" in normal use on 9.0+, or * "pg_regress" when run under `make installcheck` */ const char *appname = getenv("PGAPPNAME"); /* Keep track of whether we have gotten through setup to install * the repack_trigger, log table, etc. ourselves. We don't want to * go through repack_cleanup() if we didn't actually set up the * trigger ourselves, lest we be cleaning up another pg_repack's mess, * or worse, interfering with a still-running pg_repack. */ bool table_init = false; initStringInfo(&sql); elog(INFO, "repacking table \"%s\"", table->target_name); elog(DEBUG2, "---- repack_one_table ----"); elog(DEBUG2, "target_name : %s", table->target_name); elog(DEBUG2, "target_oid : %u", table->target_oid); elog(DEBUG2, "target_toast : %u", table->target_toast); elog(DEBUG2, "target_tidx : %u", table->target_tidx); elog(DEBUG2, "pkid : %u", table->pkid); elog(DEBUG2, "ckid : %u", table->ckid); elog(DEBUG2, "create_pktype : %s", table->create_pktype); elog(DEBUG2, "create_log : %s", table->create_log); elog(DEBUG2, "create_trigger : %s", table->create_trigger); elog(DEBUG2, "enable_trigger : %s", table->enable_trigger); elog(DEBUG2, "create_table : %s", table->create_table); elog(DEBUG2, "dest_tablespace : %s", table->dest_tablespace); elog(DEBUG2, "copy_data : %s", table->copy_data); elog(DEBUG2, "alter_col_storage : %s", table->alter_col_storage ? table->alter_col_storage : "(skipped)"); elog(DEBUG2, "drop_columns : %s", table->drop_columns ? table->drop_columns : "(skipped)"); elog(DEBUG2, "delete_log : %s", table->delete_log); elog(DEBUG2, "lock_table : %s", table->lock_table); elog(DEBUG2, "sql_peek : %s", table->sql_peek); elog(DEBUG2, "sql_insert : %s", table->sql_insert); elog(DEBUG2, "sql_delete : %s", table->sql_delete); elog(DEBUG2, "sql_update : %s", table->sql_update); elog(DEBUG2, "sql_pop : %s", table->sql_pop); if (dryrun) return; /* push repack_cleanup_callback() on stack to clean temporary objects */ pgut_atexit_push(repack_cleanup_callback, table); /* * 1. Setup advisory lock and trigger on main table. */ elog(DEBUG2, "---- setup ----"); params[0] = utoa(table->target_oid, buffer); if (!advisory_lock(connection, buffer)) goto cleanup; if (!(lock_exclusive(connection, buffer, table->lock_table, true))) { if (no_kill_backend) elog(INFO, "Skipping repack %s due to timeout", table->target_name); else elog(WARNING, "lock_exclusive() failed for %s", table->target_name); goto cleanup; } /* * pg_get_indexdef requires an access share lock, so do those calls while * we have an access exclusive lock anyway, so we know they won't block. */ indexparams[0] = utoa(table->target_oid, indexbuffer); indexparams[1] = moveidx ? tablespace : NULL; /* First, just display a warning message for any invalid indexes * which may be on the table (mostly to match the behavior of 1.1.8), * if --error-on-invalid-index is not set */ indexres = execute( "SELECT pg_get_indexdef(indexrelid)" " FROM pg_index WHERE indrelid = $1 AND NOT indisvalid", 1, indexparams); for (j = 0; j < PQntuples(indexres); j++) { const char *indexdef; indexdef = getstr(indexres, j, 0); if (error_on_invalid_index) { elog(WARNING, "Invalid index: %s", indexdef); goto cleanup; } else { elog(WARNING, "skipping invalid index: %s", indexdef); } } indexres = execute( "SELECT indexrelid," " repack.repack_indexdef(indexrelid, indrelid, $2, FALSE) " " FROM pg_index WHERE indrelid = $1 AND indisvalid", 2, indexparams); table->n_indexes = PQntuples(indexres); table->indexes = pgut_malloc(table->n_indexes * sizeof(repack_index)); for (j = 0; j < table->n_indexes; j++) { table->indexes[j].target_oid = getoid(indexres, j, 0); table->indexes[j].create_index = getstr(indexres, j, 1); table->indexes[j].status = UNPROCESSED; table->indexes[j].worker_idx = -1; /* Unassigned */ } for (j = 0; j < table->n_indexes; j++) { elog(DEBUG2, "index[%d].target_oid : %u", j, table->indexes[j].target_oid); elog(DEBUG2, "index[%d].create_index : %s", j, table->indexes[j].create_index); } /* * Check if repack_trigger is not conflict with existing trigger. We can * find it out later but we check it in advance and go to cleanup if needed. * In AFTER trigger context, since triggered tuple is not changed by other * trigger we don't care about the fire order. */ res = execute("SELECT repack.conflicted_triggers($1)", 1, params); if (PQntuples(res) > 0) { ereport(WARNING, (errcode(E_PG_COMMAND), errmsg("the table \"%s\" already has a trigger called \"%s\"", table->target_name, "repack_trigger"), errdetail( "The trigger was probably installed during a previous" " attempt to run pg_repack on the table which was" " interrupted and for some reason failed to clean up" " the temporary objects. Please drop the trigger or drop" " and recreate the pg_repack extension altogether" " to remove all the temporary objects left over."))); goto cleanup; } CLEARPGRES(res); command(table->create_pktype, 0, NULL); temp_obj_num++; command(table->create_log, 0, NULL); temp_obj_num++; command(table->create_trigger, 0, NULL); temp_obj_num++; command(table->enable_trigger, 0, NULL); printfStringInfo(&sql, "SELECT repack.disable_autovacuum('repack.log_%u')", table->target_oid); command(sql.data, 0, NULL); /* While we are still holding an AccessExclusive lock on the table, submit * the request for an AccessShare lock asynchronously from conn2. * We want to submit this query in conn2 while connection's * transaction still holds its lock, so that no DDL may sneak in * between the time that connection commits and conn2 gets its lock. */ pgut_command(conn2, "BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); /* grab the backend PID of conn2; we'll need this when querying * pg_locks momentarily. */ res = pgut_execute(conn2, "SELECT pg_backend_pid()", 0, NULL); buffer[0] = '\0'; strncat(buffer, PQgetvalue(res, 0, 0), sizeof(buffer) - 1); CLEARPGRES(res); /* * Not using lock_access_share() here since we know that * it's not possible to obtain the ACCESS SHARE lock right now * in conn2, since the primary connection holds ACCESS EXCLUSIVE. */ printfStringInfo(&sql, "LOCK TABLE %s IN ACCESS SHARE MODE", table->target_name); elog(DEBUG2, "LOCK TABLE %s IN ACCESS SHARE MODE", table->target_name); if (PQsetnonblocking(conn2, 1)) { elog(WARNING, "Unable to set conn2 nonblocking."); goto cleanup; } if (!(PQsendQuery(conn2, sql.data))) { elog(WARNING, "Error sending async query: %s\n%s", sql.data, PQerrorMessage(conn2)); goto cleanup; } /* Now that we've submitted the LOCK TABLE request through conn2, * look for and cancel any (potentially dangerous) DDL commands which * might also be waiting on our table lock at this point -- * it's not safe to let them wait, because they may grab their * AccessExclusive lock before conn2 gets its AccessShare lock, * and perform unsafe DDL on the table. * * Normally, lock_access_share() would take care of this for us, * but we're not able to use it here. */ if (!(kill_ddl(connection, table->target_oid, true))) { if (no_kill_backend) elog(INFO, "Skipping repack %s due to timeout.", table->target_name); else elog(WARNING, "kill_ddl() failed."); goto cleanup; } /* We're finished killing off any unsafe DDL. COMMIT in our main * connection, so that conn2 may get its AccessShare lock. */ command("COMMIT", 0, NULL); /* The main connection has now committed its repack_trigger, * log table, and temp. table. If any error occurs from this point * on and we bail out, we should try to clean those up. */ table_init = true; /* Keep looping PQgetResult() calls until it returns NULL, indicating the * command is done and we have obtained our lock. */ while ((res = PQgetResult(conn2))) { elog(DEBUG2, "Waiting on ACCESS SHARE lock..."); if (PQresultStatus(res) != PGRES_COMMAND_OK) { elog(WARNING, "Error with LOCK TABLE: %s", PQerrorMessage(conn2)); goto cleanup; } CLEARPGRES(res); } /* Turn conn2 back into blocking mode for further non-async use. */ if (PQsetnonblocking(conn2, 0)) { elog(WARNING, "Unable to set conn2 blocking."); goto cleanup; } /* * 2. Copy tuples into temp table. */ elog(DEBUG2, "---- copy tuples ----"); /* Must use SERIALIZABLE (or at least not READ COMMITTED) to avoid race * condition between the create_table statement and rows subsequently * being added to the log. */ command("BEGIN ISOLATION LEVEL SERIALIZABLE", 0, NULL); /* SET work_mem = maintenance_work_mem */ command("SELECT set_config('work_mem', current_setting('maintenance_work_mem'), true)", 0, NULL); if (orderby && !orderby[0]) command("SET LOCAL synchronize_seqscans = off", 0, NULL); /* Fetch an array of Virtual IDs of all transactions active right now. */ params[0] = buffer; /* backend PID of conn2 */ params[1] = PROGRAM_NAME; res = execute(SQL_XID_SNAPSHOT, 2, params); vxid = pgut_strdup(PQgetvalue(res, 0, 0)); CLEARPGRES(res); /* Delete any existing entries in the log table now, since we have not * yet run the CREATE TABLE ... AS SELECT, which will take in all existing * rows from the target table; if we also included prior rows from the * log we could wind up with duplicates. */ command(table->delete_log, 0, NULL); /* We need to be able to obtain an AccessShare lock on the target table * for the create_table command to go through, so go ahead and obtain * the lock explicitly. * * Since conn2 has been diligently holding its AccessShare lock, it * is possible that another transaction has been waiting to acquire * an AccessExclusive lock on the table (e.g. a concurrent ALTER TABLE * or TRUNCATE which we must not allow). If there are any such * transactions, lock_access_share() will kill them so that our * CREATE TABLE ... AS SELECT does not deadlock waiting for an * AccessShare lock. */ if (!(lock_access_share(connection, table->target_oid, table->target_name))) goto cleanup; /* * Before copying data to the target table, we need to set the column storage * type if its storage type has been changed from the type default. */ params[0] = utoa(table->target_oid, buffer); params[1] = table->dest_tablespace; command(table->create_table, 2, params); if (table->alter_col_storage) command(table->alter_col_storage, 0, NULL); command(table->copy_data, 0, NULL); temp_obj_num++; printfStringInfo(&sql, "SELECT repack.disable_autovacuum('repack.table_%u')", table->target_oid); if (table->drop_columns) command(table->drop_columns, 0, NULL); command(sql.data, 0, NULL); command("COMMIT", 0, NULL); /* * 3. Create indexes on temp table. */ if (!rebuild_indexes(table)) goto cleanup; /* don't clear indexres until after rebuild_indexes or bad things happen */ CLEARPGRES(indexres); CLEARPGRES(res); /* * 4. Apply log to temp table until no tuples are left in the log * and all of the old transactions are finished. */ for (;;) { num = apply_log(connection, table, APPLY_COUNT); /* We'll keep applying tuples from the log table in batches * of APPLY_COUNT, until applying a batch of tuples * (via LIMIT) results in our having applied * switch_threshold or fewer tuples. We don't want to * get stuck repetitively applying some small number of tuples * from the log table as inserts/updates/deletes may be * constantly coming into the original table. */ if (num > switch_threshold) continue; /* there might be still some tuples, repeat. */ /* old transactions still alive ? */ params[0] = vxid; res = execute(SQL_XID_ALIVE, 1, params); num = PQntuples(res); if (num > 0) { /* Wait for old transactions. * Only display this message if we are NOT * running under pg_regress, so as not to cause * noise which would trip up pg_regress. */ if (!appname || strcmp(appname, "pg_regress") != 0) { elog(NOTICE, "Waiting for %d transactions to finish. First PID: %s", num, PQgetvalue(res, 0, 0)); } CLEARPGRES(res); sleep(1); continue; } else { /* All old transactions are finished; * go to next step. */ CLEARPGRES(res); break; } } /* * 5. Swap: will be done with conn2, since it already holds an * AccessShare lock. */ elog(DEBUG2, "---- swap ----"); /* Bump our existing AccessShare lock to AccessExclusive */ if (!(lock_exclusive(conn2, utoa(table->target_oid, buffer), table->lock_table, false))) { elog(WARNING, "lock_exclusive() failed in conn2 for %s", table->target_name); goto cleanup; } apply_log(conn2, table, 0); params[0] = utoa(table->target_oid, buffer); pgut_command(conn2, "SELECT repack.repack_swap($1)", 1, params); pgut_command(conn2, "COMMIT", 0, NULL); /* * 6. Drop. */ elog(DEBUG2, "---- drop ----"); command("BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); if (!(lock_exclusive(connection, utoa(table->target_oid, buffer), table->lock_table, false))) { elog(WARNING, "lock_exclusive() failed in connection for %s", table->target_name); goto cleanup; } params[1] = utoa(temp_obj_num, indexbuffer); command("SELECT repack.repack_drop($1, $2)", 2, params); command("COMMIT", 0, NULL); temp_obj_num = 0; /* reset temporary object counter after cleanup */ /* * 7. Analyze. * Note that cleanup hook has been already uninstalled here because analyze * is not an important operation; No clean up even if failed. */ if (analyze) { elog(DEBUG2, "---- analyze ----"); command("BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); printfStringInfo(&sql, "ANALYZE %s", table->target_name); command(sql.data, 0, NULL); command("COMMIT", 0, NULL); } /* Release advisory lock on table. */ params[0] = REPACK_LOCK_PREFIX_STR; params[1] = utoa(table->target_oid, buffer); res = pgut_execute(connection, "SELECT pg_advisory_unlock($1, CAST(-2147483648 + $2::bigint AS integer))", 2, params); ret = true; cleanup: CLEARPGRES(res); termStringInfo(&sql); if (vxid) free(vxid); /* Rollback current transactions */ pgut_rollback(connection); pgut_rollback(conn2); /* XXX: distinguish between fatal and non-fatal errors via the first * arg to repack_cleanup(). */ if ((!ret) && table_init) repack_cleanup(false, table); } /* Kill off any concurrent DDL (or any transaction attempting to take * an AccessExclusive lock) trying to run against our table if we want to * do. Note, we're killing these queries off *before* they are granted * an AccessExclusive lock on our table. * * Returns true if no problems encountered, false otherwise. */ static bool kill_ddl(PGconn *conn, Oid relid, bool terminate) { bool ret = true; PGresult *res; StringInfoData sql; int n_tuples; initStringInfo(&sql); /* Check the number of backends competing AccessExclusiveLock */ printfStringInfo(&sql, COUNT_COMPETING_LOCKS, relid); res = pgut_execute(conn, sql.data, 0, NULL); n_tuples = PQntuples(res); if (n_tuples != 0) { /* Competing backend is exsits, but if we do not want to calcel/terminate * any backend, do nothing. */ if (no_kill_backend) { elog(WARNING, "%d unsafe queries remain but do not cancel them and skip to repack it", n_tuples); ret = false; } else { resetStringInfo(&sql); printfStringInfo(&sql, CANCEL_COMPETING_LOCKS, relid); res = pgut_execute(conn, sql.data, 0, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(WARNING, "Error canceling unsafe queries: %s", PQerrorMessage(conn)); ret = false; } else if (PQntuples(res) > 0 && terminate && PQserverVersion(conn) >= 80400) { elog(WARNING, "Canceled %d unsafe queries. Terminating any remaining PIDs.", PQntuples(res)); CLEARPGRES(res); printfStringInfo(&sql, KILL_COMPETING_LOCKS, relid); res = pgut_execute(conn, sql.data, 0, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(WARNING, "Error killing unsafe queries: %s", PQerrorMessage(conn)); ret = false; } } else if (PQntuples(res) > 0) elog(NOTICE, "Canceled %d unsafe queries", PQntuples(res)); } } else elog(DEBUG2, "No competing DDL to cancel."); CLEARPGRES(res); termStringInfo(&sql); return ret; } /* * Try to acquire an ACCESS SHARE table lock, avoiding deadlocks and long * waits by killing off other sessions which may be stuck trying to obtain * an ACCESS EXCLUSIVE lock. * * Arguments: * * conn: connection to use * relid: OID of relation * target_name: name of table */ static bool lock_access_share(PGconn *conn, Oid relid, const char *target_name) { StringInfoData sql; time_t start = time(NULL); int i; bool ret = true; initStringInfo(&sql); for (i = 1; ; i++) { time_t duration; PGresult *res; int wait_msec; duration = time(NULL) - start; /* Cancel queries unconditionally, i.e. don't bother waiting * wait_timeout as lock_exclusive() does -- the only queries we * should be killing are disallowed DDL commands hanging around * for an AccessExclusive lock, which must be deadlocked at * this point anyway since conn2 holds its AccessShare lock * already. */ if (duration > (wait_timeout * 2)) ret = kill_ddl(conn, relid, true); else ret = kill_ddl(conn, relid, false); if (!ret) break; /* wait for a while to lock the table. */ wait_msec = Min(1000, i * 100); printfStringInfo(&sql, "SET LOCAL lock_timeout = %d", wait_msec); pgut_command(conn, sql.data, 0, NULL); printfStringInfo(&sql, "LOCK TABLE %s IN ACCESS SHARE MODE", target_name); res = pgut_execute_elevel(conn, sql.data, 0, NULL, DEBUG2); if (PQresultStatus(res) == PGRES_COMMAND_OK) { CLEARPGRES(res); break; } else if (sqlstate_equals(res, SQLSTATE_LOCK_NOT_AVAILABLE)) { /* retry if lock conflicted */ CLEARPGRES(res); pgut_rollback(conn); continue; } else { /* exit otherwise */ elog(WARNING, "%s", PQerrorMessage(connection)); CLEARPGRES(res); ret = false; break; } } termStringInfo(&sql); pgut_command(conn, "RESET lock_timeout", 0, NULL); return ret; } /* Obtain an advisory lock on the table's OID, to make sure no other * pg_repack is working on the table. This is not so much a concern with * full-table repacks, but mainly so that index-only repacks don't interfere * with each other or a full-table repack. */ static bool advisory_lock(PGconn *conn, const char *relid) { PGresult *res = NULL; bool ret = false; const char *params[2]; params[0] = REPACK_LOCK_PREFIX_STR; params[1] = relid; /* For the 2-argument form of pg_try_advisory_lock, we need to * pass in two signed 4-byte integers. But a table OID is an * *unsigned* 4-byte integer. Add -2147483648 to that OID to make * it fit reliably into signed int space. */ res = pgut_execute(conn, "SELECT pg_try_advisory_lock($1, CAST(-2147483648 + $2::bigint AS integer))", 2, params); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(ERROR, "%s", PQerrorMessage(connection)); } else if (strcmp(getstr(res, 0, 0), "t") != 0) { elog(ERROR, "Another pg_repack command may be running on the table. Please try again later."); } else { ret = true; } CLEARPGRES(res); return ret; } /* * Try acquire an ACCESS EXCLUSIVE table lock, avoiding deadlocks and long * waits by killing off other sessions. * Arguments: * * conn: connection to use * relid: OID of relation * lock_query: LOCK TABLE ... IN ACCESS EXCLUSIVE query to be executed * start_xact: whether we will issue a BEGIN ourselves. If not, we will * use a SAVEPOINT and ROLLBACK TO SAVEPOINT if our query * times out, to avoid leaving the transaction in error state. */ static bool lock_exclusive(PGconn *conn, const char *relid, const char *lock_query, bool start_xact) { time_t start = time(NULL); int i; bool ret = true; for (i = 1; ; i++) { time_t duration; char sql[1024]; PGresult *res; int wait_msec; if (start_xact) pgut_command(conn, "BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); else pgut_command(conn, "SAVEPOINT repack_sp1", 0, NULL); duration = time(NULL) - start; if (duration > wait_timeout) { if (no_kill_backend) { elog(WARNING, "timed out, do not cancel conflicting backends"); ret = false; /* Before exit the loop reset the transaction */ if (start_xact) pgut_rollback(conn); else pgut_command(conn, "ROLLBACK TO SAVEPOINT repack_sp1", 0, NULL); break; } else { const char *cancel_query; if (PQserverVersion(conn) >= 80400 && duration > wait_timeout * 2) { elog(WARNING, "terminating conflicted backends"); cancel_query = "SELECT pg_terminate_backend(pid) FROM pg_locks" " WHERE locktype = 'relation'" " AND relation = $1 AND pid <> pg_backend_pid()"; } else { elog(WARNING, "canceling conflicted backends"); cancel_query = "SELECT pg_cancel_backend(pid) FROM pg_locks" " WHERE locktype = 'relation'" " AND relation = $1 AND pid <> pg_backend_pid()"; } pgut_command(conn, cancel_query, 1, &relid); } } /* wait for a while to lock the table. */ wait_msec = Min(1000, i * 100); snprintf(sql, lengthof(sql), "SET LOCAL lock_timeout = %d", wait_msec); pgut_command(conn, sql, 0, NULL); res = pgut_execute_elevel(conn, lock_query, 0, NULL, DEBUG2); if (PQresultStatus(res) == PGRES_COMMAND_OK) { CLEARPGRES(res); break; } else if (sqlstate_equals(res, SQLSTATE_LOCK_NOT_AVAILABLE)) { /* retry if lock conflicted */ CLEARPGRES(res); if (start_xact) pgut_rollback(conn); else pgut_command(conn, "ROLLBACK TO SAVEPOINT repack_sp1", 0, NULL); continue; } else { /* exit otherwise */ printf("%s", PQerrorMessage(connection)); CLEARPGRES(res); ret = false; break; } } pgut_command(conn, "RESET lock_timeout", 0, NULL); return ret; } /* This function calls to repack_drop() to clean temporary objects on error * in creation of temporary objects. */ void repack_cleanup_callback(bool fatal, void *userdata) { repack_table *table = (repack_table *) userdata; Oid target_table = table->target_oid; const char *params[2]; char buffer[12]; char num_buff[12]; if(fatal) { params[0] = utoa(target_table, buffer); params[1] = utoa(temp_obj_num, num_buff); /* testing PQstatus() of connection and conn2, as we do * in repack_cleanup(), doesn't seem to work here, * so just use an unconditional reconnect(). */ reconnect(ERROR); command("BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); if (!(lock_exclusive(connection, params[0], table->lock_table, false))) { pgut_rollback(connection); elog(ERROR, "lock_exclusive() failed in connection for %s during cleanup callback", table->target_name); } command("SELECT repack.repack_drop($1, $2)", 2, params); command("COMMIT", 0, NULL); temp_obj_num = 0; /* reset temporary object counter after cleanup */ } } /* * The userdata pointing a table being re-organized. We need to cleanup temp * objects before the program exits. */ static void repack_cleanup(bool fatal, const repack_table *table) { if (fatal) { fprintf(stderr, "!!!FATAL ERROR!!! Please refer to the manual.\n\n"); } else { char buffer[12]; char num_buff[12]; const char *params[2]; /* Try reconnection if not available. */ if (PQstatus(connection) != CONNECTION_OK || PQstatus(conn2) != CONNECTION_OK) reconnect(ERROR); /* do cleanup */ params[0] = utoa(table->target_oid, buffer); params[1] = utoa(temp_obj_num, num_buff); command("BEGIN ISOLATION LEVEL READ COMMITTED", 0, NULL); if (!(lock_exclusive(connection, params[0], table->lock_table, false))) { pgut_rollback(connection); elog(ERROR, "lock_exclusive() failed in connection for %s during cleanup", table->target_name); } command("SELECT repack.repack_drop($1, $2)", 2, params); command("COMMIT", 0, NULL); temp_obj_num = 0; /* reset temporary object counter after cleanup */ } } /* * Indexes of a table are repacked. */ static bool repack_table_indexes(PGresult *index_details) { bool ret = false; PGresult *res = NULL, *res2 = NULL; StringInfoData sql, sql_drop; char buffer[2][12]; const char *create_idx, *schema_name, *table_name, *params[3]; Oid table, index; int i, num, num_repacked = 0; bool *repacked_indexes; initStringInfo(&sql); num = PQntuples(index_details); table = getoid(index_details, 0, 3); params[1] = utoa(table, buffer[1]); params[2] = tablespace; schema_name = getstr(index_details, 0, 5); /* table_name is schema-qualified */ table_name = getstr(index_details, 0, 4); /* Keep track of which of the table's indexes we have successfully * repacked, so that we may DROP only those indexes. */ if (!(repacked_indexes = calloc(num, sizeof(bool)))) ereport(ERROR, (errcode(ENOMEM), errmsg("Unable to calloc repacked_indexes"))); /* Check if any concurrent pg_repack command is being run on the same * table. */ if (!advisory_lock(connection, params[1])) ereport(ERROR, (errcode(EINVAL), errmsg("Unable to obtain advisory lock on \"%s\"", table_name))); for (i = 0; i < num; i++) { char *isvalid = getstr(index_details, i, 2); char *idx_name = getstr(index_details, i, 0); if (isvalid[0] == 't') { index = getoid(index_details, i, 1); resetStringInfo(&sql); appendStringInfo(&sql, "SELECT pgc.relname, nsp.nspname " "FROM pg_class pgc INNER JOIN pg_namespace nsp " "ON nsp.oid = pgc.relnamespace " "WHERE pgc.relname = 'index_%u' " "AND nsp.nspname = $1", index); params[0] = schema_name; elog(INFO, "repacking index \"%s\"", idx_name); res = execute(sql.data, 1, params); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(WARNING, "%s", PQerrorMessage(connection)); continue; } if (PQntuples(res) > 0) { ereport(WARNING, (errcode(E_PG_COMMAND), errmsg("Cannot create index \"%s\".\"index_%u\", " "already exists", schema_name, index), errdetail("An invalid index may have been left behind" " by a previous pg_repack on the table" " which was interrupted. Please use DROP " "INDEX \"%s\".\"index_%u\"" " to remove this index and try again.", schema_name, index))); continue; } if (dryrun) continue; params[0] = utoa(index, buffer[0]); res = execute("SELECT repack.repack_indexdef($1, $2, $3, true)", 3, params); if (PQntuples(res) < 1) { elog(WARNING, "unable to generate SQL to CREATE work index for %s", getstr(index_details, i, 0)); continue; } create_idx = getstr(res, 0, 0); /* Use a separate PGresult to avoid stomping on create_idx */ res2 = execute_elevel(create_idx, 0, NULL, DEBUG2); if (PQresultStatus(res2) != PGRES_COMMAND_OK) { ereport(WARNING, (errcode(E_PG_COMMAND), errmsg("Error creating index \"%s\".\"index_%u\": %s", schema_name, index, PQerrorMessage(connection) ) )); } else { repacked_indexes[i] = true; num_repacked++; } CLEARPGRES(res); CLEARPGRES(res2); } else elog(WARNING, "skipping invalid index: %s.%s", schema_name, getstr(index_details, i, 0)); } if (dryrun) { ret = true; goto done; } /* If we did not successfully repack any indexes, e.g. because of some * error affecting every CREATE INDEX attempt, don't waste time with * the ACCESS EXCLUSIVE lock on the table, and return false. * N.B. none of the DROP INDEXes should be performed since * repacked_indexes[] flags should all be false. */ if (!num_repacked) { elog(WARNING, "Skipping index swapping for \"%s\", since no new indexes built", table_name); goto drop_idx; } /* take an exclusive lock on table before calling repack_index_swap() */ resetStringInfo(&sql); appendStringInfo(&sql, "LOCK TABLE %s IN ACCESS EXCLUSIVE MODE", table_name); if (!(lock_exclusive(connection, params[1], sql.data, true))) { elog(WARNING, "lock_exclusive() failed in connection for %s", table_name); goto drop_idx; } for (i = 0; i < num; i++) { index = getoid(index_details, i, 1); if (repacked_indexes[i]) { params[0] = utoa(index, buffer[0]); pgut_command(connection, "SELECT repack.repack_index_swap($1)", 1, params); } else elog(INFO, "Skipping index swap for index_%u", index); } pgut_command(connection, "COMMIT", 0, NULL); ret = true; drop_idx: resetStringInfo(&sql); initStringInfo(&sql_drop); appendStringInfoString(&sql, "DROP INDEX CONCURRENTLY "); appendStringInfo(&sql, "\"%s\".", schema_name); for (i = 0; i < num; i++) { index = getoid(index_details, i, 1); if (repacked_indexes[i]) { initStringInfo(&sql_drop); appendStringInfo(&sql_drop, "%s\"index_%u\"", sql.data, index); command(sql_drop.data, 0, NULL); } else elog(INFO, "Skipping drop of index_%u", index); } termStringInfo(&sql_drop); termStringInfo(&sql); done: CLEARPGRES(res); free(repacked_indexes); return ret; } /* * Call repack_table_indexes for each of the tables */ static bool repack_all_indexes(char *errbuf, size_t errsize) { bool ret = false; PGresult *res = NULL; StringInfoData sql; SimpleStringListCell *cell = NULL; const char *params[1]; initStringInfo(&sql); reconnect(ERROR); assert(r_index.head || table_list.head || parent_table_list.head); if (!preliminary_checks(errbuf, errsize)) goto cleanup; if (!is_requested_relation_exists(errbuf, errsize)) goto cleanup; if (r_index.head) { appendStringInfoString(&sql, "SELECT repack.oid2text(i.oid), idx.indexrelid, idx.indisvalid, idx.indrelid, repack.oid2text(idx.indrelid), n.nspname" " FROM pg_index idx JOIN pg_class i ON i.oid = idx.indexrelid" " JOIN pg_namespace n ON n.oid = i.relnamespace" " WHERE idx.indexrelid = $1::regclass ORDER BY indisvalid DESC, i.relname, n.nspname"); cell = r_index.head; } else if (table_list.head || parent_table_list.head) { appendStringInfoString(&sql, "SELECT repack.oid2text(i.oid), idx.indexrelid, idx.indisvalid, idx.indrelid, $1::text, n.nspname" " FROM pg_index idx JOIN pg_class i ON i.oid = idx.indexrelid" " JOIN pg_namespace n ON n.oid = i.relnamespace" " WHERE idx.indrelid = $1::regclass ORDER BY indisvalid DESC, i.relname, n.nspname"); for (cell = parent_table_list.head; cell; cell = cell->next) { int nchildren, i; params[0] = cell->val; /* find children of this parent table */ res = execute_elevel("SELECT quote_ident(n.nspname) || '.' || quote_ident(c.relname)" " FROM pg_class c JOIN pg_namespace n on n.oid = c.relnamespace" " WHERE c.oid = ANY (repack.get_table_and_inheritors($1::regclass))" " ORDER BY n.nspname, c.relname", 1, params, DEBUG2); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(WARNING, "%s", PQerrorMessage(connection)); continue; } nchildren = PQntuples(res); if (nchildren == 0) { elog(WARNING, "relation \"%s\" does not exist", cell->val); continue; } /* append new tables to 'table_list' */ for (i = 0; i < nchildren; i++) simple_string_list_append(&table_list, getstr(res, i, 0)); } CLEARPGRES(res); cell = table_list.head; } for (; cell; cell = cell->next) { params[0] = cell->val; res = execute_elevel(sql.data, 1, params, DEBUG2); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(WARNING, "%s", PQerrorMessage(connection)); continue; } if (PQntuples(res) == 0) { if(table_list.head) elog(WARNING, "\"%s\" does not have any indexes", cell->val); else if(r_index.head) elog(WARNING, "\"%s\" is not a valid index", cell->val); continue; } if(table_list.head) elog(INFO, "repacking indexes of \"%s\"", cell->val); if (!repack_table_indexes(res)) elog(WARNING, "repack failed for \"%s\"", cell->val); CLEARPGRES(res); } ret = true; cleanup: disconnect(); termStringInfo(&sql); return ret; } void pgut_help(bool details) { printf("%s re-organizes a PostgreSQL database.\n\n", PROGRAM_NAME); printf("Usage:\n"); printf(" %s [OPTION]... [DBNAME]\n", PROGRAM_NAME); if (!details) return; printf("Options:\n"); printf(" -a, --all repack all databases\n"); printf(" -t, --table=TABLE repack specific table only\n"); printf(" -I, --parent-table=TABLE repack specific parent table and its inheritors\n"); printf(" -c, --schema=SCHEMA repack tables in specific schema only\n"); printf(" -s, --tablespace=TBLSPC move repacked tables to a new tablespace\n"); printf(" -S, --moveidx move repacked indexes to TBLSPC too\n"); printf(" -o, --order-by=COLUMNS order by columns instead of cluster keys\n"); printf(" -n, --no-order do vacuum full instead of cluster\n"); printf(" -N, --dry-run print what would have been repacked\n"); printf(" -j, --jobs=NUM Use this many parallel jobs for each table\n"); printf(" -i, --index=INDEX move only the specified index\n"); printf(" -x, --only-indexes move only indexes of the specified table\n"); printf(" -T, --wait-timeout=SECS timeout to cancel other backends on conflict\n"); printf(" -D, --no-kill-backend don't kill other backends when timed out\n"); printf(" -Z, --no-analyze don't analyze at end\n"); printf(" -k, --no-superuser-check skip superuser checks in client\n"); printf(" -C, --exclude-extension don't repack tables which belong to specific extension\n"); printf(" --error-on-invalid-index don't repack tables which belong to specific extension\n"); printf(" --switch-threshold switch tables when that many tuples are left to catchup\n"); } pg_repack-ver_1.5.0/bin/pgut/000077500000000000000000000000001452746007700160675ustar00rootroot00000000000000pg_repack-ver_1.5.0/bin/pgut/pgut-fe.c000066400000000000000000000407321452746007700176100ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut-fe.c * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #define FRONTEND #include "pgut-fe.h" #include "common/username.h" #ifdef HAVE_GETOPT_H #include #else #include #endif const char *dbname = NULL; char *host = NULL; char *port = NULL; char *username = NULL; char *password = NULL; YesNo prompt_password = DEFAULT; PGconn *connection = NULL; PGconn *conn2 = NULL; worker_conns workers = { .num_workers = 0, .conns = NULL }; static bool parse_pair(const char buffer[], char key[], char value[]); /* * Set up worker conns which will be used for concurrent index rebuilds. * 'num_workers' is the desired number of worker connections, i.e. from * --jobs flag. Due to max_connections we might not actually be able to * set up that many workers, but don't treat that as a fatal error. */ void setup_workers(int num_workers) { StringInfoData buf; int i; PGconn *conn; elog(DEBUG2, "In setup_workers(), target num_workers = %d", num_workers); if (num_workers > 1 && num_workers > workers.num_workers) { initStringInfo(&buf); if (dbname && dbname[0]) appendStringInfo(&buf, "dbname=%s ", dbname); if (host && host[0]) appendStringInfo(&buf, "host=%s ", host); if (port && port[0]) appendStringInfo(&buf, "port=%s ", port); if (username && username[0]) appendStringInfo(&buf, "user=%s ", username); if (password && password[0]) appendStringInfo(&buf, "password=%s ", password); if (workers.conns == NULL) { elog(NOTICE, "Setting up workers.conns"); workers.conns = (PGconn **) pgut_malloc(sizeof(PGconn *) * num_workers); } else { elog(ERROR, "TODO: Implement pool resizing."); } for (i = 0; i < num_workers; i++) { /* Don't prompt for password again; we should have gotten * it already from reconnect(). */ elog(DEBUG2, "Setting up worker conn %d", i); /* Don't confuse pgut_connections by using pgut_connect() * * XXX: could use PQconnectStart() and PQconnectPoll() to * open these connections in non-blocking manner. */ conn = PQconnectdb(buf.data); if (PQstatus(conn) == CONNECTION_OK) { workers.conns[i] = conn; } else { elog(WARNING, "Unable to set up worker conn #%d: %s", i, PQerrorMessage(conn)); break; } /* Hardcode a search path to avoid injections into public or pg_temp */ pgut_command(conn, "SET search_path TO pg_catalog, pg_temp, public", 0, NULL); /* Make sure each worker connection can work in non-blocking * mode. */ if (PQsetnonblocking(workers.conns[i], 1)) { elog(ERROR, "Unable to set worker connection %d " "non-blocking.", i); } } /* In case we bailed out of setting up all workers, record * how many successful worker conns we actually have. */ workers.num_workers = i; termStringInfo(&buf); } } /* Disconnect all our worker conns. */ void disconnect_workers(void) { int i; if (!(workers.num_workers)) elog(DEBUG2, "No workers to disconnect."); else { for (i = 0; i < workers.num_workers; i++) { if (workers.conns[i]) { elog(DEBUG2, "Disconnecting worker %d.", i); PQfinish(workers.conns[i]); workers.conns[i] = NULL; } else { elog(NOTICE, "Worker %d already disconnected?", i); } } workers.num_workers = 0; free(workers.conns); workers.conns = NULL; } } /* * the result is also available with the global variable 'connection'. */ void reconnect(int elevel) { StringInfoData buf; char *new_password; disconnect(); initStringInfo(&buf); if (dbname && dbname[0]) appendStringInfo(&buf, "dbname=%s ", dbname); if (host && host[0]) appendStringInfo(&buf, "host=%s ", host); if (port && port[0]) appendStringInfo(&buf, "port=%s ", port); if (username && username[0]) appendStringInfo(&buf, "user=%s ", username); if (password && password[0]) appendStringInfo(&buf, "password=%s ", password); connection = pgut_connect(buf.data, prompt_password, elevel); conn2 = pgut_connect(buf.data, prompt_password, elevel); /* update password */ if (connection) { new_password = PQpass(connection); if (new_password && new_password[0] && (password == NULL || strcmp(new_password, password) != 0)) { free(password); password = pgut_strdup(new_password); } } termStringInfo(&buf); } void disconnect(void) { if (connection) { pgut_disconnect(connection); connection = NULL; } if (conn2) { pgut_disconnect(conn2); conn2 = NULL; } disconnect_workers(); } static void option_from_env(pgut_option options[]) { size_t i; for (i = 0; options && options[i].type; i++) { pgut_option *opt = &options[i]; char name[256]; size_t j; const char *s; const char *value; if (opt->source > SOURCE_ENV || opt->allowed == SOURCE_DEFAULT || opt->allowed > SOURCE_ENV) continue; for (s = opt->lname, j = 0; *s && j < lengthof(name) - 1; s++, j++) { if (strchr("-_ ", *s)) name[j] = '_'; /* - to _ */ else name[j] = toupper(*s); } name[j] = '\0'; if ((value = getenv(name)) != NULL) pgut_setopt(opt, value, SOURCE_ENV); } } /* compare two strings ignore cases and ignore -_ */ bool pgut_keyeq(const char *lhs, const char *rhs) { for (; *lhs && *rhs; lhs++, rhs++) { if (strchr("-_ ", *lhs)) { if (!strchr("-_ ", *rhs)) return false; } else if (ToLower(*lhs) != ToLower(*rhs)) return false; } return *lhs == '\0' && *rhs == '\0'; } void pgut_setopt(pgut_option *opt, const char *optarg, pgut_optsrc src) { const char *message; if (opt == NULL) { fprintf(stderr, "Try \"%s --help\" for more information.\n", PROGRAM_NAME); exit(EINVAL); } if (opt->source > src) { /* high prior value has been set already. */ return; } else if (src >= SOURCE_CMDLINE && opt->source >= src && opt->type != 'l') { /* duplicated option in command line -- don't worry if the option * type is 'l' i.e. SimpleStringList, since we are allowed to have * multiples of these. */ message = "specified only once"; } else { /* can be overwritten if non-command line source */ opt->source = src; switch (opt->type) { case 'b': case 'B': if (optarg == NULL) { *((bool *) opt->var) = (opt->type == 'b'); return; } else if (parse_bool(optarg, (bool *) opt->var)) { return; } message = "a boolean"; break; case 'f': ((pgut_optfn) opt->var)(opt, optarg); return; case 'i': if (parse_int32(optarg, opt->var)) return; message = "a 32bit signed integer"; break; case 'l': message = "a List"; simple_string_list_append(opt->var, optarg); return; case 'u': if (parse_uint32(optarg, opt->var)) return; message = "a 32bit unsigned integer"; break; case 'I': if (parse_int64(optarg, opt->var)) return; message = "a 64bit signed integer"; break; case 'U': if (parse_uint64(optarg, opt->var)) return; message = "a 64bit unsigned integer"; break; case 's': if (opt->source != SOURCE_DEFAULT) free(*(char **) opt->var); *(char **) opt->var = pgut_strdup(optarg); return; case 't': if (parse_time(optarg, opt->var)) return; message = "a time"; break; case 'y': case 'Y': if (optarg == NULL) { *(YesNo *) opt->var = (opt->type == 'y' ? YES : NO); return; } else { bool value; if (parse_bool(optarg, &value)) { *(YesNo *) opt->var = (value ? YES : NO); return; } } message = "a boolean"; break; default: ereport(ERROR, (errcode(EINVAL), errmsg("invalid option type: %c", opt->type))); return; /* keep compiler quiet */ } } if (isprint(opt->sname)) ereport(ERROR, (errcode(EINVAL), errmsg("option -%c, --%s should be %s: '%s'", opt->sname, opt->lname, message, optarg))); else ereport(ERROR, (errcode(EINVAL), errmsg("option --%s should be %s: '%s'", opt->lname, message, optarg))); } /* * Get configuration from configuration file. */ void pgut_readopt(const char *path, pgut_option options[], int elevel) { FILE *fp; char buf[1024]; char key[1024]; char value[1024]; if (!options) return; if ((fp = pgut_fopen(path, "Rt")) == NULL) return; while (fgets(buf, lengthof(buf), fp)) { size_t i; for (i = strlen(buf); i > 0 && IsSpace(buf[i - 1]); i--) buf[i - 1] = '\0'; if (parse_pair(buf, key, value)) { for (i = 0; options[i].type; i++) { pgut_option *opt = &options[i]; if (pgut_keyeq(key, opt->lname)) { if (opt->allowed == SOURCE_DEFAULT || opt->allowed > SOURCE_FILE) elog(elevel, "option %s cannot specified in file", opt->lname); else if (opt->source <= SOURCE_FILE) pgut_setopt(opt, value, SOURCE_FILE); break; } } if (!options[i].type) elog(elevel, "invalid option \"%s\"", key); } } fclose(fp); } static const char * skip_space(const char *str, const char *line) { while (IsSpace(*str)) { str++; } return str; } static const char * get_next_token(const char *src, char *dst, const char *line) { const char *s; size_t i; size_t j; if ((s = skip_space(src, line)) == NULL) return NULL; /* parse quoted string */ if (*s == '\'') { s++; for (i = 0, j = 0; s[i] != '\0'; i++) { if (s[i] == '\\') { i++; switch (s[i]) { case 'b': dst[j] = '\b'; break; case 'f': dst[j] = '\f'; break; case 'n': dst[j] = '\n'; break; case 'r': dst[j] = '\r'; break; case 't': dst[j] = '\t'; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': { int k; long octVal = 0; for (k = 0; s[i + k] >= '0' && s[i + k] <= '7' && k < 3; k++) octVal = (octVal << 3) + (s[i + k] - '0'); i += k - 1; dst[j] = ((char) octVal); } break; default: dst[j] = s[i]; break; } } else if (s[i] == '\'') { i++; /* doubled quote becomes just one quote */ if (s[i] == '\'') dst[j] = s[i]; else break; } else dst[j] = s[i]; j++; } } else { i = j = strcspn(s, "# \n\r\t\v"); memcpy(dst, s, j); } dst[j] = '\0'; return s + i; } static bool parse_pair(const char buffer[], char key[], char value[]) { const char *start; const char *end; key[0] = value[0] = '\0'; /* * parse key */ start = buffer; if ((start = skip_space(start, buffer)) == NULL) return false; end = start + strcspn(start, "=# \n\r\t\v"); /* skip blank buffer */ if (end - start <= 0) { if (*start == '=') elog(WARNING, "syntax error in \"%s\"", buffer); return false; } /* key found */ strncpy(key, start, end - start); key[end - start] = '\0'; /* find key and value split char */ if ((start = skip_space(end, buffer)) == NULL) return false; if (*start != '=') { elog(WARNING, "syntax error in \"%s\"", buffer); return false; } start++; /* * parse value */ if ((end = get_next_token(start, value, buffer)) == NULL) return false; if ((start = skip_space(end, buffer)) == NULL) return false; if (*start != '\0' && *start != '#') { elog(WARNING, "syntax error in \"%s\"", buffer); return false; } return true; } /* * execute - Execute a SQL and return the result. */ PGresult * execute(const char *query, int nParams, const char **params) { return pgut_execute(connection, query, nParams, params); } PGresult * execute_elevel(const char *query, int nParams, const char **params, int elevel) { return pgut_execute_elevel(connection, query, nParams, params, elevel); } /* * command - Execute a SQL and discard the result. */ ExecStatusType command(const char *query, int nParams, const char **params) { return pgut_command(connection, query, nParams, params); } static void set_elevel(pgut_option *opt, const char *arg) { pgut_log_level = parse_elevel(arg); } static pgut_option default_options[] = { { 'b', 'e', "echo" , &pgut_echo }, { 'f', 'E', "elevel" , set_elevel }, { 's', 'd', "dbname" , &dbname }, { 's', 'h', "host" , &host }, { 's', 'p', "port" , &port }, { 's', 'U', "username" , &username }, { 'Y', 'w', "no-password" , &prompt_password }, { 'y', 'W', "password" , &prompt_password }, { 0 } }; static size_t option_length(const pgut_option opts[]) { size_t len; for (len = 0; opts && opts[len].type; len++) { } return len; } static pgut_option * option_find(int c, pgut_option opts1[], pgut_option opts2[]) { size_t i; for (i = 0; opts1 && opts1[i].type; i++) if (opts1[i].sname == c) return &opts1[i]; for (i = 0; opts2 && opts2[i].type; i++) if (opts2[i].sname == c) return &opts2[i]; return NULL; /* not found */ } static int option_has_arg(char type) { switch (type) { case 'b': case 'B': case 'y': case 'Y': return no_argument; default: return required_argument; } } static void option_copy(struct option dst[], const pgut_option opts[], size_t len) { size_t i; for (i = 0; i < len; i++) { dst[i].name = opts[i].lname; dst[i].has_arg = option_has_arg(opts[i].type); dst[i].flag = NULL; dst[i].val = opts[i].sname; } } static struct option * option_merge(const pgut_option opts1[], const pgut_option opts2[]) { struct option *result; size_t len1 = option_length(opts1); size_t len2 = option_length(opts2); size_t n = len1 + len2; result = pgut_newarray(struct option, n + 1); option_copy(result, opts1, len1); option_copy(result + len1, opts2, len2); memset(&result[n], 0, sizeof(pgut_option)); return result; } static char * longopts_to_optstring(const struct option opts[]) { size_t len; char *result; char *s; for (len = 0; opts[len].name; len++) { } result = pgut_malloc(len * 2 + 1); s = result; for (len = 0; opts[len].name; len++) { if (!isprint(opts[len].val)) continue; *s++ = opts[len].val; if (opts[len].has_arg != no_argument) *s++ = ':'; } *s = '\0'; return result; } int pgut_getopt(int argc, char **argv, pgut_option options[]) { int c; int optindex = 0; char *optstring; struct option *longopts; pgut_option *opt; pgut_init(argc, argv); /* Help message and version are handled at first. */ if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { help(true); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { printf("%s %s\n", PROGRAM_NAME, PROGRAM_VERSION); exit(0); } if (strcmp(argv[1], "--configuration") == 0) { printf("%s\n", PG_VERSION_STR); exit(0); } } /* Merge default and user options. */ longopts = option_merge(default_options, options); optstring = longopts_to_optstring(longopts); /* Assign named options */ while ((c = getopt_long(argc, argv, optstring, longopts, &optindex)) != -1) { opt = option_find(c, default_options, options); pgut_setopt(opt, optarg, SOURCE_CMDLINE); } /* Read environment variables */ option_from_env(options); (void) (dbname || (dbname = getenv("PGDATABASE")) || (dbname = getenv("PGUSER")) || (dbname = get_user_name_or_exit(PROGRAM_NAME))); return optind; } void help(bool details) { pgut_help(details); if (details) { printf("\nConnection options:\n"); printf(" -d, --dbname=DBNAME database to connect\n"); printf(" -h, --host=HOSTNAME database server host or socket directory\n"); printf(" -p, --port=PORT database server port\n"); printf(" -U, --username=USERNAME user name to connect as\n"); printf(" -w, --no-password never prompt for password\n"); printf(" -W, --password force password prompt\n"); } printf("\nGeneric options:\n"); if (details) { printf(" -e, --echo echo queries\n"); printf(" -E, --elevel=LEVEL set output message level\n"); } printf(" --help show this help, then exit\n"); printf(" --version output version information, then exit\n"); if (details && (PROGRAM_URL || PROGRAM_ISSUES)) { printf("\n"); if (PROGRAM_URL) printf("Read the website for details: <%s>.\n", PROGRAM_URL); if (PROGRAM_ISSUES) printf("Report bugs to <%s>.\n", PROGRAM_ISSUES); } } pg_repack-ver_1.5.0/bin/pgut/pgut-fe.h000066400000000000000000000046161452746007700176160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut-fe.h * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #ifndef PGUT_FE_H #define PGUT_FE_H #include "pgut.h" typedef enum pgut_optsrc { SOURCE_DEFAULT, SOURCE_ENV, SOURCE_FILE, SOURCE_CMDLINE, SOURCE_CONST } pgut_optsrc; /* * type: * b: bool (true) * B: bool (false) * f: pgut_optfn * i: 32bit signed integer * l: StringList * u: 32bit unsigned integer * I: 64bit signed integer * U: 64bit unsigned integer * s: string * t: time_t * y: YesNo (YES) * Y: YesNo (NO) */ typedef struct pgut_option { char type; char sname; /* short name */ const char *lname; /* long name */ void *var; /* pointer to variable */ pgut_optsrc allowed; /* allowed source */ pgut_optsrc source; /* actual source */ } pgut_option; typedef void (*pgut_optfn) (pgut_option *opt, const char *arg); typedef struct worker_conns { int max_num_workers; int num_workers; PGconn **conns; } worker_conns; extern const char *dbname; extern char *host; extern char *port; extern char *username; extern char *password; extern YesNo prompt_password; extern PGconn *connection; extern PGconn *conn2; extern worker_conns workers; extern void pgut_help(bool details); extern void help(bool details); extern void disconnect(void); extern void reconnect(int elevel); extern void setup_workers(int num_workers); extern void disconnect_workers(void); extern PGresult *execute(const char *query, int nParams, const char **params); extern PGresult *execute_elevel(const char *query, int nParams, const char **params, int elevel); extern ExecStatusType command(const char *query, int nParams, const char **params); extern int pgut_getopt(int argc, char **argv, pgut_option options[]); extern void pgut_readopt(const char *path, pgut_option options[], int elevel); extern void pgut_setopt(pgut_option *opt, const char *optarg, pgut_optsrc src); extern bool pgut_keyeq(const char *lhs, const char *rhs); /* So we don't need to fret over multiple calls to PQclear(), e.g. * in cleanup labels. */ #define CLEARPGRES(pgres) do { PQclear(pgres); pgres = NULL; } while (0) #endif /* PGUT_FE_H */ pg_repack-ver_1.5.0/bin/pgut/pgut.c000066400000000000000000001001011452746007700172030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut.c * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #include "postgres_fe.h" #include "libpq/pqsignal.h" #if PG_VERSION_NUM >= 140000 #include "common/string.h" /* for simple_prompt */ #endif #include #include #include #include "pgut.h" #ifdef PGUT_MULTI_THREADED #include "pgut-pthread.h" static pthread_key_t pgut_edata_key; static pthread_mutex_t pgut_conn_mutex; #define pgut_conn_lock() pthread_mutex_lock(&pgut_conn_mutex) #define pgut_conn_unlock() pthread_mutex_unlock(&pgut_conn_mutex) #else #define pgut_conn_lock() ((void) 0) #define pgut_conn_unlock() ((void) 0) #endif /* old gcc doesn't have LLONG_MAX. */ #ifndef LLONG_MAX #if defined(HAVE_LONG_INT_64) || !defined(HAVE_LONG_LONG_INT_64) #define LLONG_MAX LONG_MAX #else #define LLONG_MAX INT64CONST(0x7FFFFFFFFFFFFFFF) #endif #endif const char *PROGRAM_NAME = NULL; /* Interrupted by SIGINT (Ctrl+C) ? */ bool interrupted = false; static bool in_cleanup = false; /* log min messages */ int pgut_log_level = INFO; int pgut_abort_level = ERROR; bool pgut_echo = false; /* Database connections */ typedef struct pgutConn pgutConn; struct pgutConn { PGconn *conn; PGcancel *cancel; pgutConn *next; }; static pgutConn *pgut_connections; /* Connection routines */ static void init_cancel_handler(void); static void on_before_exec(pgutConn *conn); static void on_after_exec(pgutConn *conn); static void on_interrupt(void); static void on_cleanup(void); static void exit_or_abort(int exitcode, int elevel); void pgut_init(int argc, char **argv) { if (PROGRAM_NAME == NULL) { PROGRAM_NAME = get_progname(argv[0]); set_pglocale_pgservice(argv[0], "pgscripts"); #ifdef PGUT_MULTI_THREADED pthread_key_create(&pgut_edata_key, NULL); pthread_mutex_init(&pgut_conn_mutex, NULL); #endif /* application_name for 9.0 or newer versions */ if (getenv("PGAPPNAME") == NULL) pgut_putenv("PGAPPNAME", PROGRAM_NAME); init_cancel_handler(); atexit(on_cleanup); } } void pgut_putenv(const char *key, const char *value) { char buf[1024]; snprintf(buf, lengthof(buf), "%s=%s", key, value); putenv(pgut_strdup(buf)); /* putenv requires malloc'ed buffer */ } /* * Try to interpret value as boolean value. Valid values are: true, * false, yes, no, on, off, 1, 0; as well as unique prefixes thereof. * If the string parses okay, return true, else false. * If okay and result is not NULL, return the value in *result. */ bool parse_bool(const char *value, bool *result) { return parse_bool_with_len(value, strlen(value), result); } bool parse_bool_with_len(const char *value, size_t len, bool *result) { switch (*value) { case 't': case 'T': if (pg_strncasecmp(value, "true", len) == 0) { if (result) *result = true; return true; } break; case 'f': case 'F': if (pg_strncasecmp(value, "false", len) == 0) { if (result) *result = false; return true; } break; case 'y': case 'Y': if (pg_strncasecmp(value, "yes", len) == 0) { if (result) *result = true; return true; } break; case 'n': case 'N': if (pg_strncasecmp(value, "no", len) == 0) { if (result) *result = false; return true; } break; case 'o': case 'O': /* 'o' is not unique enough */ if (pg_strncasecmp(value, "on", (len > 2 ? len : 2)) == 0) { if (result) *result = true; return true; } else if (pg_strncasecmp(value, "off", (len > 2 ? len : 2)) == 0) { if (result) *result = false; return true; } break; case '1': if (len == 1) { if (result) *result = true; return true; } break; case '0': if (len == 1) { if (result) *result = false; return true; } break; default: break; } if (result) *result = false; /* suppress compiler warning */ return false; } /* * Parse string as 32bit signed int. * valid range: -2147483648 ~ 2147483647 */ bool parse_int32(const char *value, int32 *result) { int64 val; char *endptr; if (strcmp(value, INFINITE_STR) == 0) { *result = INT_MAX; return true; } errno = 0; val = strtol(value, &endptr, 0); if (endptr == value || *endptr) return false; if (errno == ERANGE || val != (int64) ((int32) val)) return false; *result = (int32) val; return true; } /* * Parse string as 32bit unsigned int. * valid range: 0 ~ 4294967295 (2^32-1) */ bool parse_uint32(const char *value, uint32 *result) { uint64 val; char *endptr; if (strcmp(value, INFINITE_STR) == 0) { *result = UINT_MAX; return true; } errno = 0; val = strtoul(value, &endptr, 0); if (endptr == value || *endptr) return false; if (errno == ERANGE || val != (uint64) ((uint32) val)) return false; *result = (uint32) val; return true; } /* * Parse string as int64 * valid range: -9223372036854775808 ~ 9223372036854775807 */ bool parse_int64(const char *value, int64 *result) { int64 val; char *endptr; if (strcmp(value, INFINITE_STR) == 0) { *result = LLONG_MAX; return true; } errno = 0; #ifdef WIN32 val = _strtoi64(value, &endptr, 0); #elif defined(HAVE_LONG_INT_64) val = strtol(value, &endptr, 0); #elif defined(HAVE_LONG_LONG_INT_64) val = strtoll(value, &endptr, 0); #else val = strtol(value, &endptr, 0); #endif if (endptr == value || *endptr) return false; if (errno == ERANGE) return false; *result = val; return true; } /* * Parse string as uint64 * valid range: 0 ~ (2^64-1) */ bool parse_uint64(const char *value, uint64 *result) { uint64 val; char *endptr; if (strcmp(value, INFINITE_STR) == 0) { #if defined(HAVE_LONG_INT_64) *result = ULONG_MAX; #elif defined(HAVE_LONG_LONG_INT_64) *result = ULLONG_MAX; #else *result = ULONG_MAX; #endif return true; } errno = 0; #ifdef WIN32 val = _strtoui64(value, &endptr, 0); #elif defined(HAVE_LONG_INT_64) val = strtoul(value, &endptr, 0); #elif defined(HAVE_LONG_LONG_INT_64) val = strtoull(value, &endptr, 0); #else val = strtoul(value, &endptr, 0); #endif if (endptr == value || *endptr) return false; if (errno == ERANGE) return false; *result = val; return true; } /* * Convert ISO-8601 format string to time_t value. */ bool parse_time(const char *value, time_t *time) { size_t len; char *tmp; int i; struct tm tm; char junk[2]; /* tmp = replace( value, !isalnum, ' ' ) */ tmp = pgut_malloc(strlen(value) + 1); len = 0; for (i = 0; value[i]; i++) tmp[len++] = (IsAlnum(value[i]) ? value[i] : ' '); tmp[len] = '\0'; /* parse for "YYYY-MM-DD HH:MI:SS" */ memset(&tm, 0, sizeof(tm)); tm.tm_year = 0; /* tm_year is year - 1900 */ tm.tm_mon = 0; /* tm_mon is 0 - 11 */ tm.tm_mday = 1; /* tm_mday is 1 - 31 */ tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; i = sscanf(tmp, "%04d %02d %02d %02d %02d %02d%1s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec, junk); free(tmp); if (i < 1 || 6 < i) return false; /* adjust year */ if (tm.tm_year < 100) tm.tm_year += 2000 - 1900; else if (tm.tm_year >= 1900) tm.tm_year -= 1900; /* adjust month */ if (i > 1) tm.tm_mon -= 1; /* determine whether Daylight Saving Time is in effect */ tm.tm_isdst = -1; *time = mktime(&tm); return true; } /* Append the given string `val` to the `list` */ void simple_string_list_append(SimpleStringList *list, const char *val) { SimpleStringListCell *cell; /* this calculation correctly accounts for the null trailing byte */ cell = (SimpleStringListCell *) pgut_malloc(sizeof(SimpleStringListCell) + strlen(val)); cell->next = NULL; strcpy(cell->val, val); if (list->tail) list->tail->next = cell; else list->head = cell; list->tail = cell; } /* Test whether `val` is in the given `list` */ bool simple_string_list_member(SimpleStringList *list, const char *val) { SimpleStringListCell *cell; for (cell = list->head; cell; cell = cell->next) { if (strcmp(cell->val, val) == 0) return true; } return false; } /* Returns the number of elements in the given SimpleStringList */ size_t simple_string_list_size(SimpleStringList list) { size_t i = 0; SimpleStringListCell *cell = list.head; while (cell) { cell = cell->next; i++; } return i; } static char * prompt_for_password(void) { char *buf; static char *passwdbuf; static bool have_passwd = false; #if PG_VERSION_NUM >= 140000 static size_t passwd_size = 0; #endif #define BUFSIZE 1024 #if PG_VERSION_NUM < 100000 if (have_passwd) { buf = pgut_malloc(BUFSIZE); memcpy(buf, passwdbuf, sizeof(char)*BUFSIZE); } else { buf = simple_prompt("Password: ", BUFSIZE, false); have_passwd = true; passwdbuf = pgut_malloc(BUFSIZE); memcpy(passwdbuf, buf, sizeof(char)*BUFSIZE); } #elif PG_VERSION_NUM < 140000 buf = pgut_malloc(BUFSIZE); if (have_passwd) { memcpy(buf, passwdbuf, sizeof(char)*BUFSIZE); } else { if (buf != NULL) simple_prompt("Password: ", buf, BUFSIZE, false); have_passwd = true; passwdbuf = pgut_malloc(BUFSIZE); memcpy(passwdbuf, buf, sizeof(char)*BUFSIZE); } #else if (have_passwd) { buf = pgut_malloc(passwd_size); memcpy(buf, passwdbuf, sizeof(char) * passwd_size); } else { buf = simple_prompt("Password: ", false); passwd_size = strlen(buf) + 1; have_passwd = true; passwdbuf = pgut_malloc(passwd_size); memcpy(passwdbuf, buf, sizeof(char) * passwd_size); } #endif if (buf == NULL) ereport(FATAL, (errcode_errno(), errmsg("could not allocate memory (" UINT64_FORMAT " bytes): ", (uint64) BUFSIZE))); return buf; #undef BUFSIZE } PGconn * pgut_connect(const char *info, YesNo prompt, int elevel) { char *passwd; StringInfoData add_pass; if (prompt == YES) { passwd = prompt_for_password(); initStringInfo(&add_pass); appendStringInfoString(&add_pass, info); appendStringInfo(&add_pass, " password=%s ", passwd); } else { passwd = NULL; add_pass.data = NULL; } /* Start the connection. Loop until we have a password if requested by backend. */ for (;;) { PGconn *conn; CHECK_FOR_INTERRUPTS(); if (!passwd) conn = PQconnectdb(info); else conn = PQconnectdb(add_pass.data); if (PQstatus(conn) == CONNECTION_OK) { pgutConn *c; c = pgut_new(pgutConn); c->conn = conn; c->cancel = NULL; pgut_conn_lock(); c->next = pgut_connections; pgut_connections = c; pgut_conn_unlock(); if (add_pass.data != NULL) termStringInfo(&add_pass); free(passwd); /* Hardcode a search path to avoid injections into public or pg_temp */ pgut_command(conn, "SET search_path TO pg_catalog, pg_temp, public", 0, NULL); return conn; } if (conn && PQconnectionNeedsPassword(conn) && !passwd && prompt != NO) { PQfinish(conn); passwd = prompt_for_password(); if (add_pass.data != NULL) resetStringInfo(&add_pass); else initStringInfo(&add_pass); appendStringInfoString(&add_pass, info); appendStringInfo(&add_pass, " password=%s ", passwd); continue; } if (add_pass.data != NULL) termStringInfo(&add_pass); free(passwd); ereport(elevel, (errcode(E_PG_CONNECT), errmsg("could not connect to database: %s", PQerrorMessage(conn)))); PQfinish(conn); return NULL; } } void pgut_disconnect(PGconn *conn) { if (conn) { pgutConn *c; pgutConn **prev; pgut_conn_lock(); prev = &pgut_connections; for (c = pgut_connections; c; c = c->next) { if (c->conn == conn) { *prev = c->next; break; } prev = &c->next; } pgut_conn_unlock(); PQfinish(conn); } } void pgut_disconnect_all(void) { pgut_conn_lock(); while (pgut_connections) { PQfinish(pgut_connections->conn); pgut_connections = pgut_connections->next; } pgut_conn_unlock(); } static void echo_query(const char *query, int nParams, const char **params) { int i; if (strchr(query, '\n')) elog(LOG, "(query)\n%s", query); else elog(LOG, "(query) %s", query); for (i = 0; i < nParams; i++) elog(LOG, "\t(param:%d) = %s", i, params[i] ? params[i] : "(null)"); } PGresult * pgut_execute(PGconn* conn, const char *query, int nParams, const char **params) { return pgut_execute_elevel(conn, query, nParams, params, ERROR); } PGresult * pgut_execute_elevel(PGconn* conn, const char *query, int nParams, const char **params, int elevel) { PGresult *res; pgutConn *c; CHECK_FOR_INTERRUPTS(); /* write query to elog if debug */ if (pgut_echo) echo_query(query, nParams, params); if (conn == NULL) { ereport(elevel, (errcode(E_PG_COMMAND), errmsg("not connected"))); return NULL; } /* find connection */ pgut_conn_lock(); for (c = pgut_connections; c; c = c->next) if (c->conn == conn) break; pgut_conn_unlock(); if (c) on_before_exec(c); if (nParams == 0) res = PQexec(conn, query); else res = PQexecParams(conn, query, nParams, NULL, params, NULL, NULL, 0); if (c) on_after_exec(c); switch (PQresultStatus(res)) { case PGRES_TUPLES_OK: case PGRES_COMMAND_OK: case PGRES_COPY_IN: break; default: ereport(elevel, (errcode(E_PG_COMMAND), errmsg("query failed: %s", PQerrorMessage(conn)), errdetail("query was: %s", query))); break; } return res; } ExecStatusType pgut_command(PGconn* conn, const char *query, int nParams, const char **params) { PGresult *res; ExecStatusType code; res = pgut_execute(conn, query, nParams, params); code = PQresultStatus(res); PQclear(res); return code; } /* commit if needed */ bool pgut_commit(PGconn *conn) { if (conn && PQtransactionStatus(conn) != PQTRANS_IDLE) return pgut_command(conn, "COMMIT", 0, NULL) == PGRES_COMMAND_OK; return true; /* nothing to do */ } /* rollback if needed */ void pgut_rollback(PGconn *conn) { if (conn && PQtransactionStatus(conn) != PQTRANS_IDLE) pgut_command(conn, "ROLLBACK", 0, NULL); } bool pgut_send(PGconn* conn, const char *query, int nParams, const char **params) { int res; CHECK_FOR_INTERRUPTS(); /* write query to elog if debug */ if (pgut_echo) echo_query(query, nParams, params); if (conn == NULL) { ereport(ERROR, (errcode(E_PG_COMMAND), errmsg("not connected"))); return false; } if (nParams == 0) res = PQsendQuery(conn, query); else res = PQsendQueryParams(conn, query, nParams, NULL, params, NULL, NULL, 0); if (res != 1) { ereport(ERROR, (errcode(E_PG_COMMAND), errmsg("query failed: %s", PQerrorMessage(conn)), errdetail("query was: %s", query))); return false; } return true; } int pgut_wait(int num, PGconn *connections[], struct timeval *timeout) { /* all connections are busy. wait for finish */ while (!interrupted) { int i; fd_set mask; int maxsock; FD_ZERO(&mask); maxsock = -1; for (i = 0; i < num; i++) { int sock; if (connections[i] == NULL) continue; sock = PQsocket(connections[i]); if (sock >= 0) { FD_SET(sock, &mask); if (maxsock < sock) maxsock = sock; } } if (maxsock == -1) { errno = ENOENT; return -1; } i = wait_for_sockets(maxsock + 1, &mask, timeout); if (i == 0) break; /* timeout */ for (i = 0; i < num; i++) { if (connections[i] && FD_ISSET(PQsocket(connections[i]), &mask)) { PQconsumeInput(connections[i]); if (PQisBusy(connections[i])) continue; return i; } } } errno = EINTR; return -1; } /* * CHECK_FOR_INTERRUPTS - Ctrl+C pressed? */ void CHECK_FOR_INTERRUPTS(void) { if (interrupted && !in_cleanup) ereport(FATAL, (errcode(EINTR), errmsg("interrupted"))); } /* * elog staffs */ typedef struct pgutErrorData { int elevel; int save_errno; int code; StringInfoData msg; StringInfoData detail; } pgutErrorData; /* FIXME: support recursive error */ static pgutErrorData * getErrorData(void) { #ifdef PGUT_MULTI_THREADED pgutErrorData *edata = pthread_getspecific(pgut_edata_key); if (edata == NULL) { edata = pgut_new(pgutErrorData); memset(edata, 0, sizeof(pgutErrorData)); pthread_setspecific(pgut_edata_key, edata); } return edata; #else static pgutErrorData edata; return &edata; #endif } static pgutErrorData * pgut_errinit(int elevel) { int save_errno = errno; pgutErrorData *edata = getErrorData(); edata->elevel = elevel; edata->save_errno = save_errno; edata->code = (elevel >= ERROR ? 1 : 0); /* reset msg */ if (edata->msg.data) resetStringInfo(&edata->msg); else initStringInfo(&edata->msg); /* reset detail */ if (edata->detail.data) resetStringInfo(&edata->detail); else initStringInfo(&edata->detail); return edata; } /* remove white spaces and line breaks from the end of buffer */ static void trimStringBuffer(StringInfo str) { while (str->len > 0 && IsSpace(str->data[str->len - 1])) str->data[--str->len] = '\0'; } void elog(int elevel, const char *fmt, ...) { va_list args; bool ok; size_t len; pgutErrorData *edata; if (elevel < pgut_abort_level && !log_required(elevel, pgut_log_level)) return; edata = pgut_errinit(elevel); do { va_start(args, fmt); ok = pgut_appendStringInfoVA(&edata->msg, fmt, args); va_end(args); } while (!ok); len = strlen(fmt); if (len > 2 && strcmp(fmt + len - 2, ": ") == 0) appendStringInfoString(&edata->msg, strerror(edata->save_errno)); trimStringBuffer(&edata->msg); pgut_errfinish(true); } bool pgut_errstart(int elevel) { if (elevel < pgut_abort_level && !log_required(elevel, pgut_log_level)) return false; pgut_errinit(elevel); return true; } void pgut_errfinish(int dummy, ...) { pgutErrorData *edata = getErrorData(); if (log_required(edata->elevel, pgut_log_level)) pgut_error(edata->elevel, edata->code, edata->msg.data ? edata->msg.data : "unknown", edata->detail.data); if (pgut_abort_level <= edata->elevel && edata->elevel <= PANIC) { in_cleanup = true; /* need to be set for cleaning temporary objects on error */ exit_or_abort(edata->code, edata->elevel); } } #ifndef PGUT_OVERRIDE_ELOG void pgut_error(int elevel, int code, const char *msg, const char *detail) { const char *tag = format_elevel(elevel); if (detail && detail[0]) fprintf(stderr, "%s: %s\nDETAIL: %s\n", tag, msg, detail); else fprintf(stderr, "%s: %s\n", tag, msg); fflush(stderr); } #endif /* * log_required -- is elevel logically >= log_min_level? * * physical order: * DEBUG < LOG < INFO < NOTICE < WARNING < ERROR < FATAL < PANIC * log_min_messages order: * DEBUG < INFO < NOTICE < WARNING < ERROR < LOG < FATAL < PANIC */ bool log_required(int elevel, int log_min_level) { if (elevel == LOG || elevel == COMMERROR) { if (log_min_level == LOG || log_min_level <= ERROR) return true; } else if (log_min_level == LOG) { /* elevel != LOG */ if (elevel >= FATAL) return true; } /* Neither is LOG */ else if (elevel >= log_min_level) return true; return false; } const char * format_elevel(int elevel) { switch (elevel) { case DEBUG5: case DEBUG4: case DEBUG3: case DEBUG2: case DEBUG1: return "DEBUG"; case LOG: return "LOG"; case INFO: return "INFO"; case NOTICE: return "NOTICE"; case WARNING: return "WARNING"; case COMMERROR: case ERROR: return "ERROR"; case FATAL: return "FATAL"; case PANIC: return "PANIC"; default: ereport(ERROR, (errcode(EINVAL), errmsg("invalid elevel: %d", elevel))); return ""; /* unknown value; just return an empty string */ } } int parse_elevel(const char *value) { if (pg_strcasecmp(value, "DEBUG") == 0) return DEBUG2; else if (pg_strcasecmp(value, "INFO") == 0) return INFO; else if (pg_strcasecmp(value, "NOTICE") == 0) return NOTICE; else if (pg_strcasecmp(value, "LOG") == 0) return LOG; else if (pg_strcasecmp(value, "WARNING") == 0) return WARNING; else if (pg_strcasecmp(value, "ERROR") == 0) return ERROR; else if (pg_strcasecmp(value, "FATAL") == 0) return FATAL; else if (pg_strcasecmp(value, "PANIC") == 0) return PANIC; ereport(ERROR, (errcode(EINVAL), errmsg("invalid elevel: %s", value))); return ERROR; /* unknown value; just return ERROR */ } int errcode(int sqlerrcode) { pgutErrorData *edata = getErrorData(); edata->code = sqlerrcode; return 0; } int errcode_errno(void) { pgutErrorData *edata = getErrorData(); edata->code = edata->save_errno; return 0; } int errmsg(const char *fmt,...) { pgutErrorData *edata = getErrorData(); va_list args; size_t len; bool ok; do { va_start(args, fmt); ok = pgut_appendStringInfoVA(&edata->msg, fmt, args); va_end(args); } while (!ok); len = strlen(fmt); if (len > 2 && strcmp(fmt + len - 2, ": ") == 0) appendStringInfoString(&edata->msg, strerror(edata->save_errno)); trimStringBuffer(&edata->msg); return 0; /* return value does not matter */ } int errdetail(const char *fmt,...) { pgutErrorData *edata = getErrorData(); va_list args; bool ok; do { va_start(args, fmt); ok = pgut_appendStringInfoVA(&edata->detail, fmt, args); va_end(args); } while (!ok); trimStringBuffer(&edata->detail); return 0; /* return value does not matter */ } #ifdef WIN32 static CRITICAL_SECTION cancelConnLock; #endif /* * on_before_exec * * Set cancel to point to the current database connection. */ static void on_before_exec(pgutConn *conn) { PGcancel *old; if (in_cleanup) return; /* forbid cancel during cleanup */ #ifdef WIN32 EnterCriticalSection(&cancelConnLock); #endif /* Free the old one if we have one */ old = conn->cancel; /* be sure handle_sigint doesn't use pointer while freeing */ conn->cancel = NULL; if (old != NULL) PQfreeCancel(old); conn->cancel = PQgetCancel(conn->conn); #ifdef WIN32 LeaveCriticalSection(&cancelConnLock); #endif } /* * on_after_exec * * Free the current cancel connection, if any, and set to NULL. */ static void on_after_exec(pgutConn *conn) { PGcancel *old; if (in_cleanup) return; /* forbid cancel during cleanup */ #ifdef WIN32 EnterCriticalSection(&cancelConnLock); #endif old = conn->cancel; /* be sure handle_sigint doesn't use pointer while freeing */ conn->cancel = NULL; if (old != NULL) PQfreeCancel(old); #ifdef WIN32 LeaveCriticalSection(&cancelConnLock); #endif } /* * Handle interrupt signals by cancelling the current command. */ static void on_interrupt(void) { pgutConn *c; int save_errno = errno; /* Set interrupted flag */ interrupted = true; if (in_cleanup) return; /* Send QueryCancel if we are processing a database query */ pgut_conn_lock(); for (c = pgut_connections; c; c = c->next) { char buf[256]; if (c->cancel != NULL && PQcancel(c->cancel, buf, sizeof(buf))) elog(WARNING, "Cancel request sent"); } pgut_conn_unlock(); errno = save_errno; /* just in case the write changed it */ } typedef struct pgut_atexit_item pgut_atexit_item; struct pgut_atexit_item { pgut_atexit_callback callback; void *userdata; pgut_atexit_item *next; }; static pgut_atexit_item *pgut_atexit_stack = NULL; void pgut_atexit_push(pgut_atexit_callback callback, void *userdata) { pgut_atexit_item *item; AssertArg(callback != NULL); item = pgut_new(pgut_atexit_item); item->callback = callback; item->userdata = userdata; item->next = pgut_atexit_stack; pgut_atexit_stack = item; } void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata) { pgut_atexit_item *item; pgut_atexit_item **prev; for (item = pgut_atexit_stack, prev = &pgut_atexit_stack; item; prev = &item->next, item = item->next) { if (item->callback == callback && item->userdata == userdata) { *prev = item->next; free(item); break; } } } static void call_atexit_callbacks(bool fatal) { pgut_atexit_item *item; for (item = pgut_atexit_stack; item; item = item->next) { item->callback(fatal, item->userdata); } } static void on_cleanup(void) { in_cleanup = true; interrupted = false; call_atexit_callbacks(false); pgut_disconnect_all(); } static void exit_or_abort(int exitcode, int elevel) { if (in_cleanup && FATAL > elevel) { /* oops, error in cleanup*/ call_atexit_callbacks(true); exit(exitcode); } else if (elevel >= FATAL && elevel <= PANIC) { /* on FATAL or PANIC */ call_atexit_callbacks(true); abort(); } else { /* normal exit */ exit(exitcode); } } /* * unlike the server code, this function automatically extend the buffer. */ bool pgut_appendStringInfoVA(StringInfo str, const char *fmt, va_list args) { size_t avail; int nprinted; Assert(str != NULL); Assert(str->maxlen > 0); avail = str->maxlen - str->len - 1; nprinted = vsnprintf(str->data + str->len, avail, fmt, args); if (nprinted >= 0 && nprinted < (int) avail - 1) { str->len += nprinted; return true; } /* Double the buffer size and try again. */ enlargePQExpBuffer(str, str->maxlen); return false; } int appendStringInfoFile(StringInfo str, FILE *fp) { AssertArg(str != NULL); AssertArg(fp != NULL); for (;;) { int rc; if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0) return errno = ENOMEM; rc = fread(str->data + str->len, 1, str->maxlen - str->len - 1, fp); if (rc == 0) break; else if (rc > 0) { str->len += rc; str->data[str->len] = '\0'; } else if (ferror(fp) && errno != EINTR) return errno; } return 0; } int appendStringInfoFd(StringInfo str, int fd) { AssertArg(str != NULL); AssertArg(fd != -1); for (;;) { int rc; if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0) return errno = ENOMEM; rc = read(fd, str->data + str->len, str->maxlen - str->len - 1); if (rc == 0) break; else if (rc > 0) { str->len += rc; str->data[str->len] = '\0'; } else if (errno != EINTR) return errno; } return 0; } void * pgut_malloc(size_t size) { char *ret; if ((ret = malloc(size)) == NULL) ereport(FATAL, (errcode_errno(), errmsg("could not allocate memory (" UINT64_FORMAT " bytes): ", (uint64) size))); return ret; } void * pgut_realloc(void *p, size_t size) { char *ret; if ((ret = realloc(p, size)) == NULL) ereport(FATAL, (errcode_errno(), errmsg("could not re-allocate memory (" UINT64_FORMAT " bytes): ", (uint64) size))); return ret; } char * pgut_strdup(const char *str) { char *ret; if (str == NULL) return NULL; if ((ret = strdup(str)) == NULL) ereport(FATAL, (errcode_errno(), errmsg("could not duplicate string \"%s\": ", str))); return ret; } char * strdup_with_len(const char *str, size_t len) { char *r; if (str == NULL) return NULL; r = pgut_malloc(len + 1); memcpy(r, str, len); r[len] = '\0'; return r; } /* strdup but trim whitespaces at head and tail */ char * strdup_trim(const char *str) { size_t len; if (str == NULL) return NULL; while (IsSpace(str[0])) { str++; } len = strlen(str); while (len > 0 && IsSpace(str[len - 1])) { len--; } return strdup_with_len(str, len); } /* * Try open file. Also create parent directries if open for writes. * * mode can contain 'R', that is same as 'r' but missing ok. */ FILE * pgut_fopen(const char *path, const char *omode) { FILE *fp; bool missing_ok = false; char mode[16]; strlcpy(mode, omode, lengthof(mode)); if (mode[0] == 'R') { mode[0] = 'r'; missing_ok = true; } retry: if ((fp = fopen(path, mode)) == NULL) { if (errno == ENOENT) { if (missing_ok) return NULL; if (mode[0] == 'w' || mode[0] == 'a') { char dir[MAXPGPATH]; strlcpy(dir, path, MAXPGPATH); get_parent_directory(dir); pgut_mkdir(dir); goto retry; } } ereport(ERROR, (errcode_errno(), errmsg("could not open file \"%s\": ", path))); } return fp; } /* * this tries to build all the elements of a path to a directory a la mkdir -p * we assume the path is in canonical form, i.e. uses / as the separator. */ bool pgut_mkdir(const char *dirpath) { struct stat sb; int first, last, retval; char *path; char *p; Assert(dirpath != NULL); p = path = pgut_strdup(dirpath); retval = 0; #ifdef WIN32 /* skip network and drive specifiers for win32 */ if (strlen(p) >= 2) { if (p[0] == '/' && p[1] == '/') { /* network drive */ p = strstr(p + 2, "/"); if (p == NULL) { free(path); ereport(ERROR, (errcode(EINVAL), errmsg("invalid path \"%s\"", dirpath))); return false; } } else if (p[1] == ':' && ((p[0] >= 'a' && p[0] <= 'z') || (p[0] >= 'A' && p[0] <= 'Z'))) { /* local drive */ p += 2; } } #endif if (p[0] == '/') /* Skip leading '/'. */ ++p; for (first = 1, last = 0; !last; ++p) { if (p[0] == '\0') last = 1; else if (p[0] != '/') continue; *p = '\0'; if (!last && p[1] == '\0') last = 1; if (first) first = 0; retry: /* check for pre-existing directory; ok if it's a parent */ if (stat(path, &sb) == 0) { if (!S_ISDIR(sb.st_mode)) { if (last) errno = EEXIST; else errno = ENOTDIR; retval = 1; break; } } else if (mkdir(path, S_IRWXU) < 0) { if (errno == EEXIST) goto retry; /* another thread might create the directory. */ retval = 1; break; } if (!last) *p = '/'; } free(path); if (retval == 0) { ereport(ERROR, (errcode_errno(), errmsg("could not create directory \"%s\": ", dirpath))); return false; } return true; } #ifdef WIN32 static int select_win32(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout); #define select select_win32 #endif int wait_for_socket(int sock, struct timeval *timeout) { fd_set fds; FD_ZERO(&fds); FD_SET(sock, &fds); return wait_for_sockets(sock + 1, &fds, timeout); } int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout) { int i; for (;;) { i = select(nfds, fds, NULL, NULL, timeout); if (i < 0) { CHECK_FOR_INTERRUPTS(); if (errno != EINTR) { ereport(ERROR, (errcode_errno(), errmsg("select failed: "))); return -1; } } else return i; } } #ifndef WIN32 static void handle_sigint(SIGNAL_ARGS) { on_interrupt(); } static void init_cancel_handler(void) { pqsignal(SIGINT, handle_sigint); } #else /* WIN32 */ /* * Console control handler for Win32. Note that the control handler will * execute on a *different thread* than the main one, so we need to do * proper locking around those structures. */ static BOOL WINAPI consoleHandler(DWORD dwCtrlType) { if (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT) { EnterCriticalSection(&cancelConnLock); on_interrupt(); LeaveCriticalSection(&cancelConnLock); return TRUE; } else /* Return FALSE for any signals not being handled */ return FALSE; } static void init_cancel_handler(void) { InitializeCriticalSection(&cancelConnLock); SetConsoleCtrlHandler(consoleHandler, TRUE); } int sleep(unsigned int seconds) { Sleep(seconds * 1000); return 0; } int usleep(unsigned int usec) { Sleep((usec + 999) / 1000); /* rounded up */ return 0; } #undef select static int select_win32(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout) { struct timeval remain; if (timeout != NULL) remain = *timeout; else { remain.tv_usec = 0; remain.tv_sec = LONG_MAX; /* infinite */ } /* sleep only one second because Ctrl+C doesn't interrupt select. */ while (remain.tv_sec > 0 || remain.tv_usec > 0) { int ret; struct timeval onesec; fd_set save_readfds; fd_set save_writefds; fd_set save_exceptfds; if (remain.tv_sec > 0) { onesec.tv_sec = 1; onesec.tv_usec = 0; remain.tv_sec -= 1; } else { onesec.tv_sec = 0; onesec.tv_usec = remain.tv_usec; remain.tv_usec = 0; } /* save fds */ if (readfds) memcpy(&save_readfds, readfds, sizeof(fd_set)); if (writefds) memcpy(&save_writefds, writefds, sizeof(fd_set)); if (exceptfds) memcpy(&save_exceptfds, exceptfds, sizeof(fd_set)); ret = select(nfds, readfds, writefds, exceptfds, &onesec); if (ret > 0) return ret; /* succeeded */ else if (ret < 0) { /* error */ _dosmaperr(WSAGetLastError()); return ret; } else if (interrupted) { errno = EINTR; return -1; } /* restore fds */ if (readfds) memcpy(readfds, &save_readfds, sizeof(fd_set)); if (writefds) memcpy(writefds, &save_writefds, sizeof(fd_set)); if (exceptfds) memcpy(exceptfds, &save_exceptfds, sizeof(fd_set)); } return 0; /* timeout */ } #endif /* WIN32 */ pg_repack-ver_1.5.0/bin/pgut/pgut.h000066400000000000000000000143311452746007700172210ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut.h * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #ifndef PGUT_H #define PGUT_H #include "c.h" #include #ifndef WIN32 #include #include #endif #include "libpq-fe.h" #include "pqexpbuffer.h" #include "utils/elog.h" #define INFINITE_STR "INFINITE" #ifdef _MSC_VER #define __attribute__(x) #endif typedef enum YesNo { DEFAULT, NO, YES } YesNo; typedef void (*pgut_atexit_callback)(bool fatal, void *userdata); /* * pgut client variables and functions */ extern const char *PROGRAM_NAME; extern const char *PROGRAM_VERSION; extern const char *PROGRAM_URL; extern const char *PROGRAM_ISSUES; /* * pgut framework variables and functions */ extern bool interrupted; extern int pgut_log_level; extern int pgut_abort_level; extern bool pgut_echo; extern void pgut_init(int argc, char **argv); extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata); extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata); extern void pgut_putenv(const char *key, const char *value); /* * Database connections */ extern PGconn *pgut_connect(const char *info, YesNo prompt, int elevel); extern void pgut_disconnect(PGconn *conn); extern void pgut_disconnect_all(void); extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams, const char **params); PGresult *pgut_execute_elevel(PGconn* conn, const char *query, int nParams, const char **params, int elevel); extern ExecStatusType pgut_command(PGconn* conn, const char *query, int nParams, const char **params); extern bool pgut_commit(PGconn *conn); extern void pgut_rollback(PGconn *conn); extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char **params); extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout); /* * memory allocators */ extern void *pgut_malloc(size_t size); extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); extern char *strdup_with_len(const char *str, size_t len); extern char *strdup_trim(const char *str); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) #define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n))) #define pgut_newvar(type, m, n) ((type *) pgut_malloc(offsetof(type, m) + (n))) /* * file operations */ extern FILE *pgut_fopen(const char *path, const char *mode); extern bool pgut_mkdir(const char *path); /* * elog */ #define E_PG_CONNECT (-1) /* PostgreSQL connection error */ #define E_PG_COMMAND (-2) /* PostgreSQL query or command error */ #undef elog #undef ereport #define ereport(elevel, rest) \ (pgut_errstart(elevel) ? (pgut_errfinish rest) : (void) 0) extern void elog(int elevel, const char *fmt, ...) __attribute__((format(printf, 2, 3))); extern const char *format_elevel(int elevel); extern int parse_elevel(const char *value); extern int errcode_errno(void); extern bool log_required(int elevel, int log_min_level); extern bool pgut_errstart(int elevel); extern void pgut_errfinish(int dummy, ...); extern void pgut_error(int elevel, int code, const char *msg, const char *detail); /* * CHECK_FOR_INTERRUPTS */ #undef CHECK_FOR_INTERRUPTS extern void CHECK_FOR_INTERRUPTS(void); /* * Assert */ #undef Assert #undef AssertArg #undef AssertMacro #ifdef USE_ASSERT_CHECKING #define Assert(x) assert(x) #define AssertArg(x) assert(x) #define AssertMacro(x) assert(x) #else #define Assert(x) ((void) 0) #define AssertArg(x) ((void) 0) #define AssertMacro(x) ((void) 0) #endif /* * StringInfo and string operations */ #define STRINGINFO_H #define StringInfoData PQExpBufferData #define StringInfo PQExpBuffer #define makeStringInfo createPQExpBuffer #define initStringInfo initPQExpBuffer #define freeStringInfo destroyPQExpBuffer #define termStringInfo termPQExpBuffer #define resetStringInfo resetPQExpBuffer #define enlargeStringInfo enlargePQExpBuffer #define printfStringInfo printfPQExpBuffer /* reset + append */ #define appendStringInfo appendPQExpBuffer #define appendStringInfoString appendPQExpBufferStr #define appendStringInfoChar appendPQExpBufferChar #define appendBinaryStringInfo appendBinaryPQExpBuffer extern bool pgut_appendStringInfoVA(StringInfo str, const char *fmt, va_list args) __attribute__((format(printf, 2, 0))); extern int appendStringInfoFile(StringInfo str, FILE *fp); extern int appendStringInfoFd(StringInfo str, int fd); extern bool parse_bool(const char *value, bool *result); extern bool parse_bool_with_len(const char *value, size_t len, bool *result); extern bool parse_int32(const char *value, int32 *result); extern bool parse_uint32(const char *value, uint32 *result); extern bool parse_int64(const char *value, int64 *result); extern bool parse_uint64(const char *value, uint64 *result); extern bool parse_time(const char *value, time_t *time); #define IsSpace(c) (isspace((unsigned char)(c))) #define IsAlpha(c) (isalpha((unsigned char)(c))) #define IsAlnum(c) (isalnum((unsigned char)(c))) #define IsIdentHead(c) (IsAlpha(c) || (c) == '_') #define IsIdentBody(c) (IsAlnum(c) || (c) == '_') #define ToLower(c) (tolower((unsigned char)(c))) #define ToUpper(c) (toupper((unsigned char)(c))) /* linked list of string values and helper functions, stolen from pg_dump. */ typedef struct SimpleStringListCell { struct SimpleStringListCell *next; char val[1]; /* VARIABLE LENGTH FIELD */ } SimpleStringListCell; typedef struct SimpleStringList { SimpleStringListCell *head; SimpleStringListCell *tail; } SimpleStringList; extern void simple_string_list_append(SimpleStringList *list, const char *val); extern bool simple_string_list_member(SimpleStringList *list, const char *val); extern size_t simple_string_list_size(SimpleStringList list); /* * socket operations */ extern int wait_for_socket(int sock, struct timeval *timeout); extern int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout); #ifdef WIN32 extern int sleep(unsigned int seconds); extern int usleep(unsigned int usec); #endif #endif /* PGUT_H */ pg_repack-ver_1.5.0/doc/000077500000000000000000000000001452746007700151055ustar00rootroot00000000000000pg_repack-ver_1.5.0/doc/.gitignore000066400000000000000000000000101452746007700170640ustar00rootroot00000000000000/*.html pg_repack-ver_1.5.0/doc/Makefile000066400000000000000000000004611452746007700165460ustar00rootroot00000000000000RST2HTML = $(shell which rst2html || which rst2html.py) RSTOPTS = --stylesheet-path=style.css,html4css1.css --initial-header-level=2 HTML = $(patsubst %.rst,%.html,$(wildcard *.rst)) .PHONY: clean all : html html : $(HTML) %.html: %.rst style.css $(RST2HTML) $(RSTOPTS) $< $@ clean: rm -f $(HTML) pg_repack-ver_1.5.0/doc/pg_repack.rst000066400000000000000000000544331452746007700176030ustar00rootroot00000000000000pg_repack -- Reorganize tables in PostgreSQL databases with minimal locks ========================================================================= .. contents:: :depth: 1 :backlinks: none pg_repack_ is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore the physical order of clustered indexes. Unlike CLUSTER_ and `VACUUM FULL`_ it works online, without holding an exclusive lock on the processed tables during processing. pg_repack is efficient to boot, with performance comparable to using CLUSTER directly. pg_repack is a fork of the previous pg_reorg_ project. Please check the `project page`_ for bug report and development information. You can choose one of the following methods to reorganize: * Online CLUSTER (ordered by cluster index) * Ordered by specified columns * Online VACUUM FULL (packing rows only) * Rebuild or relocate only the indexes of a table NOTICE: * Only superusers can use the utility. * Target table must have a PRIMARY KEY, or at least a UNIQUE total index on a NOT NULL column. .. _pg_repack: https://reorg.github.io/pg_repack .. _CLUSTER: http://www.postgresql.org/docs/current/static/sql-cluster.html .. _VACUUM FULL: VACUUM_ .. _VACUUM: http://www.postgresql.org/docs/current/static/sql-vacuum.html .. _project page: https://github.com/reorg/pg_repack .. _pg_reorg: https://github.com/reorg/pg_reorg Requirements ------------ PostgreSQL versions PostgreSQL 9.4, 9.5, 9.6, 10, 11, 12, 13, 14, 15 Disks Performing a full-table repack requires free disk space about twice as large as the target table(s) and its indexes. For example, if the total size of the tables and indexes to be reorganized is 1GB, an additional 2GB of disk space is required. Download -------- You can `download pg_repack`__ from the PGXN website. Unpack the archive and follow the installation_ instructions. .. __: http://pgxn.org/dist/pg_repack/ Alternatively you can use the `PGXN Client`_ to download, compile and install the package; use:: $ pgxn install pg_repack Check the `pgxn install documentation`__ for the options available. .. _PGXN Client: https://pgxn.github.io/pgxnclient/ .. __: https://pgxn.github.io/pgxnclient/usage.html#pgxn-install Installation ------------ pg_repack can be built with ``make`` on UNIX or Linux. The PGXS build framework is used automatically. Before building, you might need to install the PostgreSQL development packages (``postgresql-devel``, etc.) and add the directory containing ``pg_config`` to your ``$PATH``. Then you can run:: $ cd pg_repack $ make $ sudo make install You can also use Microsoft Visual C++ 2010 to build the program on Windows. There are project files in the ``msvc`` folder. After installation, load the pg_repack extension in the database you want to process. pg_repack is packaged as an extension, so you can execute:: $ psql -c "CREATE EXTENSION pg_repack" -d your_database You can remove pg_repack using ``DROP EXTENSION pg_repack`` or just dropping the ``repack`` schema. If you are upgrading from a previous version of pg_repack or pg_reorg, just drop the old version from the database as explained above and install the new version. Usage ----- :: pg_repack [OPTION]... [DBNAME] The following options can be specified in ``OPTIONS``. Options: -a, --all repack all databases -t, --table=TABLE repack specific table only -I, --parent-table=TABLE repack specific parent table and its inheritors -c, --schema=SCHEMA repack tables in specific schema only -s, --tablespace=TBLSPC move repacked tables to a new tablespace -S, --moveidx move repacked indexes to *TBLSPC* too -o, --order-by=COLUMNS order by columns instead of cluster keys -n, --no-order do vacuum full instead of cluster -N, --dry-run print what would have been repacked and exit -j, --jobs=NUM Use this many parallel jobs for each table -i, --index=INDEX move only the specified index -x, --only-indexes move only indexes of the specified table -T, --wait-timeout=SECS timeout to cancel other backends on conflict -D, --no-kill-backend don't kill other backends when timed out -Z, --no-analyze don't analyze at end -k, --no-superuser-check skip superuser checks in client -C, --exclude-extension don't repack tables which belong to specific extension --error-on-invalid-index don't repack when invalid index is found --switch-threshold switch tables when that many tuples are left to catchup Connection options: -d, --dbname=DBNAME database to connect -h, --host=HOSTNAME database server host or socket directory -p, --port=PORT database server port -U, --username=USERNAME user name to connect as -w, --no-password never prompt for password -W, --password force password prompt Generic options: -e, --echo echo queries -E, --elevel=LEVEL set output message level --help show this help, then exit --version output version information, then exit Reorg Options ^^^^^^^^^^^^^ ``-a``, ``--all`` Attempt to repack all the databases of the cluster. Databases where the ``pg_repack`` extension is not installed will be skipped. ``-t TABLE``, ``--table=TABLE`` Reorganize the specified table(s) only. Multiple tables may be reorganized by writing multiple ``-t`` switches. By default, all eligible tables in the target databases are reorganized. ``-I TABLE``, ``--parent-table=TABLE`` Reorganize both the specified table(s) and its inheritors. Multiple table hierarchies may be reorganized by writing multiple ``-I`` switches. ``-c``, ``--schema`` Repack the tables in the specified schema(s) only. Multiple schemas may be repacked by writing multiple ``-c`` switches. May be used in conjunction with ``--tablespace`` to move tables to a different tablespace. ``-o COLUMNS [,...]``, ``--order-by=COLUMNS [,...]`` Perform an online CLUSTER ordered by the specified columns. ``-n``, ``--no-order`` Perform an online VACUUM FULL. Since version 1.2 this is the default for non-clustered tables. ``-N``, ``--dry-run`` List what would be repacked and exit. ``-j``, ``--jobs`` Create the specified number of extra connections to PostgreSQL, and use these extra connections to parallelize the rebuild of indexes on each table. Parallel index builds are only supported for full-table repacks, not with ``--index`` or ``--only-indexes`` options. If your PostgreSQL server has extra cores and disk I/O available, this can be a useful way to speed up pg_repack. ``-s TBLSPC``, ``--tablespace=TBLSPC`` Move the repacked tables to the specified tablespace: essentially an online version of ``ALTER TABLE ... SET TABLESPACE``. The tables' indexes are left in the original tablespace unless ``--moveidx`` is specified too. ``-S``, ``--moveidx`` Also move the indexes of the repacked tables to the tablespace specified by the ``--tablespace`` option. ``-i``, ``--index`` Repack the specified index(es) only. Multiple indexes may be repacked by writing multiple ``-i`` switches. May be used in conjunction with ``--tablespace`` to move the index to a different tablespace. ``-x``, ``--only-indexes`` Repack only the indexes of the specified table(s), which must be specified with the ``--table`` or ``--parent-table`` options. ``-T SECS``, ``--wait-timeout=SECS`` pg_repack needs to take one exclusive lock at the beginning as well as one exclusive lock at the end of the repacking process. This setting controls how many seconds pg_repack will wait to acquire this lock. If the lock cannot be taken after this duration and ``--no-kill-backend`` option is not specified, pg_repack will forcibly cancel the conflicting queries. If you are using PostgreSQL version 8.4 or newer, pg_repack will fall back to using pg_terminate_backend() to disconnect any remaining backends after twice this timeout has passed. The default is 60 seconds. ``-D``, ``--no-kill-backend`` Skip to repack table if the lock cannot be taken for duration specified ``--wait-timeout``, instead of cancelling conflicting queries. The default is false. ``-Z``, ``--no-analyze`` Disable ANALYZE after a full-table reorganization. If not specified, run ANALYZE after the reorganization. ``-k``, ``--no-superuser-check`` Skip the superuser checks in the client. This setting is useful for using pg_repack on platforms that support running it as non-superusers. ``-C``, ``--exclude-extension`` Skip tables that belong to the specified extension(s). Some extensions may heavily depend on such tables at planning time etc. ``--switch-threshold`` Switch tables when that many tuples are left in log table. This setting can be used to avoid the inability to catchup with write-heavy tables. Connection Options ^^^^^^^^^^^^^^^^^^ Options to connect to servers. You cannot use ``--all`` and ``--dbname`` or ``--table`` or ``--parent-table`` together. ``-a``, ``--all`` Reorganize all databases. ``-d DBNAME``, ``--dbname=DBNAME`` Specifies the name of the database to be reorganized. If this is not specified and ``-a`` (or ``--all``) is not used, the database name is read from the environment variable PGDATABASE. If that is not set, the user name specified for the connection is used. ``-h HOSTNAME``, ``--host=HOSTNAME`` Specifies the host name of the machine on which the server is running. If the value begins with a slash, it is used as the directory for the Unix domain socket. ``-p PORT``, ``--port=PORT`` Specifies the TCP port or local Unix domain socket file extension on which the server is listening for connections. ``-U USERNAME``, ``--username=USERNAME`` User name to connect as. ``-w``, ``--no-password`` Never issue a password prompt. If the server requires password authentication and a password is not available by other means such as a ``.pgpass`` file, the connection attempt will fail. This option can be useful in batch jobs and scripts where no user is present to enter a password. ``-W``, ``--password`` Force the program to prompt for a password before connecting to a database. This option is never essential, since the program will automatically prompt for a password if the server demands password authentication. However, pg_repack will waste a connection attempt finding out that the server wants a password. In some cases it is worth typing ``-W`` to avoid the extra connection attempt. Generic Options ^^^^^^^^^^^^^^^ ``-e``, ``--echo`` Echo commands sent to server. ``-E LEVEL``, ``--elevel=LEVEL`` Choose the output message level from ``DEBUG``, ``INFO``, ``NOTICE``, ``WARNING``, ``ERROR``, ``LOG``, ``FATAL``, and ``PANIC``. The default is ``INFO``. ``--help`` Show usage of the program. ``--version`` Show the version number of the program. Environment ----------- ``PGDATABASE``, ``PGHOST``, ``PGPORT``, ``PGUSER`` Default connection parameters This utility, like most other PostgreSQL utilities, also uses the environment variables supported by libpq (see `Environment Variables`__). .. __: http://www.postgresql.org/docs/current/static/libpq-envars.html Examples -------- Perform an online CLUSTER of all the clustered tables in the database ``test``, and perform an online VACUUM FULL of all the non-clustered tables:: $ pg_repack test Perform an online VACUUM FULL on the tables ``foo`` and ``bar`` in the database ``test`` (an eventual cluster index is ignored):: $ pg_repack --no-order --table foo --table bar test Move all indexes of table ``foo`` to tablespace ``tbs``:: $ pg_repack -d test --table foo --only-indexes --tablespace tbs Move the specified index to tablespace ``tbs``:: $ pg_repack -d test --index idx --tablespace tbs Diagnostics ----------- Error messages are reported when pg_repack fails. The following list shows the cause of errors. You need to cleanup by hand after fatal errors. To cleanup, just remove pg_repack from the database and install it again: for PostgreSQL 9.1 and following execute ``DROP EXTENSION pg_repack CASCADE`` in the database where the error occurred, followed by ``CREATE EXTENSION pg_repack``; for previous version load the script ``$SHAREDIR/contrib/uninstall_pg_repack.sql`` into the database where the error occured and then load ``$SHAREDIR/contrib/pg_repack.sql`` again. .. class:: diag INFO: database "db" skipped: pg_repack VER is not installed in the database pg_repack is not installed in the database when the ``--all`` option is specified. Create the pg_repack extension in the database. ERROR: pg_repack VER is not installed in the database pg_repack is not installed in the database specified by ``--dbname``. Create the pg_repack extension in the database. ERROR: program 'pg_repack V1' does not match database library 'pg_repack V2' There is a mismatch between the ``pg_repack`` binary and the database library (``.so`` or ``.dll``). The mismatch could be due to the wrong binary in the ``$PATH`` or the wrong database being addressed. Check the program directory and the database; if they are what expected you may need to repeat pg_repack installation. ERROR: extension 'pg_repack V1' required, found 'pg_repack V2' The SQL extension found in the database does not match the version required by the pg_repack program. You should drop the extension from the database and reload it as described in the installation_ section. ERROR: relation "table" must have a primary key or not-null unique keys The target table doesn't have a PRIMARY KEY or any UNIQUE constraints defined. Define a PRIMARY KEY or a UNIQUE constraint on the table. ERROR: query failed: ERROR: column "col" does not exist The target table doesn't have columns specified by ``--order-by`` option. Specify existing columns. WARNING: the table "tbl" already has a trigger called repack_trigger The trigger was probably installed during a previous attempt to run pg_repack on the table which was interrupted and for some reason failed to clean up the temporary objects. You can remove all the temporary objects by dropping and re-creating the extension: see the installation_ section for the details. ERROR: Another pg_repack command may be running on the table. Please try again later. There is a chance of deadlock when two concurrent pg_repack commands are run on the same table. So, try to run the command after some time. WARNING: Cannot create index "schema"."index_xxxxx", already exists DETAIL: An invalid index may have been left behind by a previous pg_repack on the table which was interrupted. Please use DROP INDEX "schema"."index_xxxxx" to remove this index and try again. A temporary index apparently created by pg_repack has been left behind, and we do not want to risk dropping this index ourselves. If the index was in fact created by an old pg_repack job which didn't get cleaned up, you should just use DROP INDEX and try the repack command again. Restrictions ------------ pg_repack comes with the following restrictions. Temp tables ^^^^^^^^^^^ pg_repack cannot reorganize temp tables. GiST indexes ^^^^^^^^^^^^ pg_repack cannot cluster tables by GiST indexes. DDL commands ^^^^^^^^^^^^ You will not be able to perform DDL commands of the target table(s) **except** VACUUM or ANALYZE while pg_repack is working. pg_repack will hold an ACCESS SHARE lock on the target table during a full-table repack, to enforce this restriction. If you are using version 1.1.8 or earlier, you must not attempt to perform any DDL commands on the target table(s) while pg_repack is running. In many cases pg_repack would fail and rollback correctly, but there were some cases in these earlier versions which could result in data corruption. Details ------- Full Table Repacks ^^^^^^^^^^^^^^^^^^ To perform a full-table repack, pg_repack will: 1. create a log table to record changes made to the original table 2. add a trigger onto the original table, logging INSERTs, UPDATEs and DELETEs into our log table 3. create a new table containing all the rows in the old table 4. build indexes on this new table 5. apply all changes which have accrued in the log table to the new table 6. swap the tables, including indexes and toast tables, using the system catalogs 7. drop the original table pg_repack will only hold an ACCESS EXCLUSIVE lock for a short period during initial setup (steps 1 and 2 above) and during the final swap-and-drop phase (steps 6 and 7). For the rest of its time, pg_repack only needs to hold an ACCESS SHARE lock on the original table, meaning INSERTs, UPDATEs, and DELETEs may proceed as usual. Index Only Repacks ^^^^^^^^^^^^^^^^^^ To perform an index-only repack, pg_repack will: 1. create new indexes on the table using CONCURRENTLY matching the definitions of the old indexes 2. swap out the old for the new indexes in the catalogs 3. drop the old indexes Creating indexes concurrently comes with a few caveats, please see `the documentation`__ for details. .. __: http://www.postgresql.org/docs/current/static/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY Releases -------- * pg_repack 1.5.0 * Added support for PostgreSQL 16 * Fix possible SQL injection (issue #368) * Support longer password length (issue #357) * Fixed infinite loop on empty password (issue #354) * Added ``--switch-threshold`` option (issue #347) * Fixed crash in ``get_order_by()`` using invalid relations (issue #321) * Added support for tables that have been previously rewritten with `VACUUM FULL` and use storage=plain for all columns (issue #313) * More careful locks acquisition (issue #298) * pg_repack 1.4.8 * Added support for PostgreSQL 15 * Fixed --parent-table on declarative partitioned tables (issue #288) * Removed connection info from error log (issue #285) * pg_repack 1.4.7 * Added support for PostgreSQL 14 * pg_repack 1.4.6 * Added support for PostgreSQL 13 * Dropped support for PostgreSQL before 9.4 * pg_repack 1.4.5 * Added support for PostgreSQL 12 * Fixed parallel processing for indexes with operators from public schema * pg_repack 1.4.4 * Added support for PostgreSQL 11 (issue #181) * Remove duplicate password prompt (issue #184) * pg_repack 1.4.3 * Fixed possible CVE-2018-1058 attack paths (issue #168) * Fixed "unexpected index definition" after CVE-2018-1058 changes in PostgreSQL (issue #169) * Fixed build with recent Ubuntu packages (issue #179) * pg_repack 1.4.2 * added PostgreSQL 10 support (issue #120) * fixed error DROP INDEX CONCURRENTLY cannot run inside a transaction block (issue #129) * pg_repack 1.4.1 * fixed broken ``--order-by`` option (issue #138) * pg_repack 1.4 * added support for PostgreSQL 9.6, dropped support for versions before 9.1 * use ``AFTER`` trigger to solve concurrency problems with ``INSERT CONFLICT`` (issue #106) * added ``--no-kill-backend`` option (issue #108) * added ``--no-superuser-check`` option (issue #114) * added ``--exclude-extension`` option (#97) * added ``--parent-table`` option (#117) * restore TOAST storage parameters on repacked tables (issue #10) * restore columns storage types in repacked tables (issue #94) * pg_repack 1.3.4 * grab exclusive lock before dropping original table (issue #81) * do not attempt to repack unlogged tables (issue #71) * pg_repack 1.3.3 * Added support for PostgreSQL 9.5 * Fixed possible deadlock when pg_repack command is interrupted (issue #55) * Fixed exit code for when pg_repack is invoked with ``--help`` and ``--version`` * Added Japanese language user manual * pg_repack 1.3.2 * Fixed to clean up temporary objects when pg_repack command is interrupted. * Fixed possible crash when pg_repack shared library is loaded alongside pg_statsinfo (issue #43). * pg_repack 1.3.1 * Added support for PostgreSQL 9.4. * pg_repack 1.3 * Added ``--schema`` to repack only the specified schema (issue #20). * Added ``--dry-run`` to do a dry run (issue #21). * Fixed advisory locking for >2B OID values (issue #30). * Avoid possible deadlock when other sessions lock a to-be-repacked table (issue #32). * Performance improvement for performing sql_pop DELETEs many-at-a-time. * Attempt to avoid pg_repack taking forever when dealing with a constant heavy stream of changes to a table. * pg_repack 1.2 * Support PostgreSQL 9.3. * Added ``--tablespace`` and ``--moveidx`` options to perform online SET TABLESPACE. * Added ``--index`` to repack the specified index only. * Added ``--only-indexes`` to repack only the indexes of the specified table * Added ``--jobs`` option for parallel operation. * Don't require ``--no-order`` to perform a VACUUM FULL on non-clustered tables (pg_repack issue #6). * Don't wait for locks held in other databases (pg_repack issue #11). * Bugfix: correctly handle key indexes with options such as DESC, NULL FIRST/LAST, COLLATE (pg_repack issue #3). * Fixed data corruption bug on delete (pg_repack issue #23). * More helpful program output and error messages. * pg_repack 1.1.8 * Added support for PostgreSQL 9.2. * Added support for CREATE EXTENSION on PostgreSQL 9.1 and following. * Give user feedback while waiting for transactions to finish (pg_reorg issue #5). * Bugfix: Allow running on newly promoted streaming replication slaves (pg_reorg issue #1). * Bugfix: Fix interaction between pg_repack and Slony 2.0/2.1 (pg_reorg issue #4) * Bugfix: Properly escape column names (pg_reorg issue #6). * Bugfix: Avoid recreating invalid indexes, or choosing them as key (pg_reorg issue #9). * Bugfix: Never choose a partial index as primary key (pg_reorg issue #22). * pg_reorg 1.1.7 (2011-08-07) * Bugfix: VIEWs and FUNCTIONs could be corrupted that used a reorganized table which has a dropped column. * Supports PostgreSQL 9.1 and 9.2dev. (but EXTENSION is not yet) See Also -------- * `clusterdb `__ * `vacuumdb `__ pg_repack-ver_1.5.0/doc/pg_repack_jp.rst000066400000000000000000001472701452746007700202760ustar00rootroot00000000000000.. pg_repack -- Reorganize tables in PostgreSQL databases with minimal locks ========================================================================= pg_repack -- PostgreSQLデータベースのテーブルを最小限のロックで再編成します ============================================================================= .. contents:: :depth: 1 :backlinks: none .. pg_repack_ is a PostgreSQL extension which lets you remove bloat from tables and indexes, and optionally restore the physical order of clustered indexes. Unlike CLUSTER_ and `VACUUM FULL`_ it works online, without holding an exclusive lock on the processed tables during processing. pg_repack is efficient to boot, with performance comparable to using CLUSTER directly. pg_repack_ はPostgreSQLの拡張の一つで、肥大化したテーブルやインデックスを再編成し、さらに指定したインデックスにしたがってレコードを並び替えることができます。 PostgreSQLの CLUSTER_ や `VACUUM FULL`_ コマンドと違って、pg_repackは処理の間対象テーブルへの排他ロックを保持し続けないため、オンライン中に動作させることができます。 pg_repackはCLUSTERコマンドを直接実行するのと同じくらいの性能で起動することができて効率的です。 .. pg_repack is a fork of the previous pg_reorg_ project. Please check the `project page`_ for bug report and development information. pg_repack は pg_reorg_ からフォークしたプロジェクトです。 バグ報告や開発情報については `project page`_ を参照してください。 .. You can choose one of the following methods to reorganize: * Online CLUSTER (ordered by cluster index) * Ordered by specified columns * Online VACUUM FULL (packing rows only) * Rebuild or relocate only the indexes of a table pg_repackでは再編成する方法として次のものが選択できます。 * オンラインCLUSTER (cluster index順にレコードを並び替える) * 指定したカラムでレコードを並び替える * オンラインVACUUM FULL (レコードのすきまを詰める) * 指定したテーブルのインデックスだけを再構築、もしくは再配置する .. NOTICE: * Only superusers can use the utility. * Target table must have a PRIMARY KEY, or at least a UNIQUE total index on a NOT NULL column. 注意: * DBのスーパーユーザだけがpg_repackを実行できます * 対象となるテーブルは主キー、もしくはNOT NULL制約を持つカラムへのユニーク制約をもつインデックスが存在している必要があります .. _pg_repack: https://reorg.github.io/pg_repack .. _CLUSTER: http://www.postgresql.jp/document/current/html/sql-cluster.html .. _VACUUM FULL: VACUUM_ .. _VACUUM: http://www.postgresql.jp/document/current/html/sql-vacuum.html .. _project page: https://github.com/reorg/pg_repack .. _pg_reorg: https://github.com/reorg/pg_reorg .. Requirements ------------ PostgreSQL versions PostgreSQL 9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 10 Disks Performing a full-table repack requires free disk space about twice as large as the target table(s) and its indexes. For example, if the total size of the tables and indexes to be reorganized is 1GB, an additional 2GB of disk space is required. 動作環境 --------- PostgreSQL バージョン PostgreSQL 9.1, 9.2, 9.3, 9.4, 9.5, 9.6 ディスク テーブル全体の再編成を行うには、対象となるテーブルと付属するインデックスのおよそ2倍のサイズのディスク空き容量が必要です。例えば、テーブルとインデックスを合わせたサイズが1GBの場合、2GBのディスク領域が必要となります。 .. Download -------- You can `download pg_repack`__ from the PGXN website. Unpack the archive and follow the installation_ instructions. .. __: http://pgxn.org/dist/pg_repack/ Alternatively you can use the `PGXN Client`_ to download, compile and install the package; use:: $ pgxn install pg_repack Check the `pgxn install documentation`__ for the options available. .. _PGXN Client: https://pgxn.github.io/pgxnclient/ .. __: https://pgxn.github.io/pgxnclient/usage.html#pgxn-install ダウンロード ------------ pg_repackは、PGXNのWebサイトから `ダウンロード`__ できます。 ダウンロードしたアーカイブを展開し、以下の手順にしたがって `インストール`_ してください。 .. __: http://pgxn.org/dist/pg_repack/ もしくは、 `PGXN Client`_ を使ってダウンロードからコンパイル、インストールすることもできます。:: $ pgxn install pg_repack 利用可能なオプションについては `pgxn install コマンドのドキュメント`__ を参照してください。 .. _PGXN Client: https://pgxn.github.io/pgxnclient/ .. __: https://pgxn.github.io/pgxnclient/usage.html#pgxn-install .. Installation ------------ pg_repack can be built with ``make`` on UNIX or Linux. The PGXS build framework is used automatically. Before building, you might need to install the PostgreSQL development packages (``postgresql-devel``, etc.) and add the directory containing ``pg_config`` to your ``$PATH``. Then you can run:: $ cd pg_repack $ make $ sudo make install You can also use Microsoft Visual C++ 2010 to build the program on Windows. There are project files in the ``msvc`` folder. After installation, load the pg_repack extension in the database you want to process. On PostgreSQL 9.1 and following pg_repack is packaged as an extension, so you can execute:: $ psql -c "CREATE EXTENSION pg_repack" -d your_database For previous PostgreSQL versions you should load the script ``$SHAREDIR/contrib/pg_repack.sql`` in the database to process; you can get ``$SHAREDIR`` using ``pg_config --sharedir``, e.g. :: $ psql -f "$(pg_config --sharedir)/contrib/pg_repack.sql" -d your_database You can remove pg_repack from a PostgreSQL 9.1 and following database using ``DROP EXTENSION pg_repack``. For previous Postgresql versions load the ``$SHAREDIR/contrib/uninstall_pg_repack.sql`` script or just drop the ``repack`` schema. If you are upgrading from a previous version of pg_repack or pg_reorg, just drop the old version from the database as explained above and install the new version. インストール ------------ Unix やLinux上では、pg_repackは ``make`` コマンドでビルドすることができます。 その際、PostgreSQLの拡張向けの構築基盤であるPGXSが自動で利用されます。 ビルトに当たっては、事前にPostgreSQLの開発パッケージ (``postgresql-devel``, etc.)をインストールしておく必要があるかもしれません。 そして、 ``pg_config`` コマンドが存在するディレクトリが ``$PATH`` に追加されている必要があります。 その上で、以下のコマンドを実行します。:: $ cd pg_repack $ make $ sudo make install Windows OS上ではMicrosoft Visual C++ 2010を利用してビルドすることができます。 ``msvc`` ディレクトリ配下にプロジェクトファイルがあります。 インストールを行った後、pg_repack エクステンションを対象のデータベースに登録します。 PostgreSQL 9.1以上のバージョンでは、以下のコマンドで実施できます。 :: $ psql -c "CREATE EXTENSION pg_repack" -d your_database それ以前のPostgreSQLバージョンの場合は、 ``$SHAREDIR/contrib/pg_repack.sql`` スクリプトを対象とするデータベースに対して実施します。 ``$SHAREDIR`` は ``pg_config --sharedir`` コマンドを実行することで確認できます。 :: $ psql -f "$(pg_config --sharedir)/contrib/pg_repack.sql" -d your_database pg_repackの登録を削除するには、PostgreSQL 9.1以上のバージョンでは、``DROP EXTENSION pg_repack`` を対象データベースに実行します。それ以前のPostgreSQLバージョンの場合は、 ``$SHAREDIR/contrib/uninstall_pg_repack.sql`` スクリプトを実行するか、 ``repack`` スキーマを削除します。 pg_repackもしくはpg_reorgの古いバージョンからのアップグレードを行うには、古いバージョンをデータベースから上記の手順で削除し、新しいバージョンを登録します。 .. Usage ----- :: pg_repack [OPTION]... [DBNAME] The following options can be specified in ``OPTIONS``. Options: -a, --all repack all databases -t, --table=TABLE repack specific table only -I, --parent-table=TABLE repack specific parent table and its inheritors -c, --schema=SCHEMA repack tables in specific schema only -s, --tablespace=TBLSPC move repacked tables to a new tablespace -S, --moveidx move repacked indexes to *TBLSPC* too -o, --order-by=COLUMNS order by columns instead of cluster keys -n, --no-order do vacuum full instead of cluster -N, --dry-run print what would have been repacked and exit -j, --jobs=NUM Use this many parallel jobs for each table -i, --index=INDEX move only the specified index -x, --only-indexes move only indexes of the specified table -T, --wait-timeout=SECS timeout to cancel other backends on conflict -D, --no-kill-backend don't kill other backends when timed out -Z, --no-analyze don't analyze at end -k, --no-superuser-check skip superuser checks in client -C, --exclude-extension don't repack tables which belong to specific extension --error-on-invalid-index don't repack when invalid index is found --switch-threshold switch tables when that many tuples are left to catchup Connection options: -d, --dbname=DBNAME database to connect -h, --host=HOSTNAME database server host or socket directory -p, --port=PORT database server port -U, --username=USERNAME user name to connect as -w, --no-password never prompt for password -W, --password force password prompt Generic options: -e, --echo echo queries -E, --elevel=LEVEL set output message level --help show this help, then exit --version output version information, then exit 利用方法 --------- :: pg_repack [OPTION]... [DBNAME] OPTIONには以下のものが指定できます。 固有オプション: -a, --all すべてのデータベースに対して実行します -t, --table=TABLE 指定したテーブルに対して実行します -I, --parent-table=TABLE 指定したテーブルとそれを継承する全ての子テーブルに対して実行します -c, --schema=SCHEMA 指定したスキーマに存在するテーブル全てに対して実行します -s, --tablespace=TBLSPC 指定したテーブル空間に再編成後のテーブルを配置します -S, --moveidx -s/--tablespaceで指定したテーブル空間に再編成対象のテーブルに付与されたインデックスも配置します -o, --order-by=COLUMNS 指定したカラムの値順に再編成します -n, --no-order オンラインVACUUM FULL相当の処理を行います -N, --dry-run 実際の処理は行わず、メッセージのみだけ出力します -j, --jobs=NUM 指定した並列度で処理を行います -i, --index=INDEX 指定したインデックスのみ再編成します -x, --only-indexes 指定したテーブルに付与されたインデックスだけを再編成します -T, --wait-timeout=SECS ロック競合している他のトランザクションをキャンセルするまで待機する時間を指定します -D, --no-kill-backend タイムアウト時に他のバックエンドをキャンセルしません -Z, --no-analyze 再編成後にANALYZEを行いません -k, --no-superuser-check 接続ユーザがスーパーユーザかどうかのチェックを行いません 接続オプション: -d, --dbname=DBNAME 接続する対象のデータベースを指定します -h, --host=HOSTNAME 接続する対象のホスト名、もしくはUNIXソケットドメインディレクトリを指定します -p, --port=PORT 接続する対象のデータベース・サーバのポート番号を指定します -U, --username=USERNAME 接続するユーザ名を指定します -w, --no-password パスワードの入力表示を無効化します -W, --password パスワード入力表示を強制的に表示します 一般オプション: -e, --echo サーバに送信するSQLを表示します -E, --elevel=LEVEL ログ出力レベルを指定します --help 使用方法を表示します .. Reorg Options ^^^^^^^^^^^^^ 再編成オプション ---------------- .. ``-a``, ``--all`` Attempt to repack all the databases of the cluster. Databases where the ``pg_repack`` extension is not installed will be skipped. ``-a``, ``--all`` データベースクラスタのすべてのデータベースを再編成します。pg_repackのエクステンションがインストールされていないデータベースはスキップされます。 .. ``-t TABLE``, ``--table=TABLE`` Reorganize the specified table(s) only. Multiple tables may be reorganized by writing multiple ``-t`` switches. By default, all eligible tables in the target databases are reorganized. ``-t TABLE``, ``--table=TABLE`` 指定したテーブルのみを再編成します。 ``-t`` オプションを複数同時に使用することで、複数のテーブルを指定することができます。このオプションを指定しない限り、対象のデータベースに存在するすべてのテーブルを再編成します。 .. ``-I TABLE``, ``--parent-table=TABLE`` Reorganize both the specified table(s) and its inheritors. Multiple table hierarchies may be reorganized by writing multiple ``-I`` switches. ``-I TABLE``, ``--parent-table=TABLE`` 指定したテーブルとその子テーブルのみを再編成します。 ``-I`` オプションを複数同時に使用することで、複数の親テーブルを指定することができます。 .. ``-c``, ``--schema`` Repack the tables in the specified schema(s) only. Multiple schemas may be repacked by writing multiple ``-c`` switches. May be used in conjunction with ``--tablespace`` to move tables to a different tablespace. ``-c``, ``--schema`` 指定したスキーマに存在するテーブルを再編成します。 ``-c`` オプションを複数同時に指定することで、複数のスキーマを指定することができます。 ``--tablespace`` オプションと同時に使用することで、特定のスキーマのテーブルを別のテーブル空間に移動する利用例が挙げられます。 .. ``-o COLUMNS [,...]``, ``--order-by=COLUMNS [,...]`` Perform an online CLUSTER ordered by the specified columns. ``-o COLUMNS [,...]``, ``--order-by=COLUMNS [,...]`` 指定したカラムの値を用いてオンラインCLUSTER処理を実行します。 .. ``-n``, ``--no-order`` Perform an online VACUUM FULL. Since version 1.2 this is the default for non-clustered tables. ``-n``, ``--no-order`` オンラインVACUUM FULL処理を実行します。バージョン1.2から、クラスタキーのないテーブルに対してはこれがデフォルトの挙動になっています。 .. ``-N``, ``--dry-run`` List what would be repacked and exit. ``-N``, ``--dry-run`` 実際の処理は実行せずに、実施する内容についてのメッセージだけを出力します。 .. ``-j``, ``--jobs`` Create the specified number of extra connections to PostgreSQL, and use these extra connections to parallelize the rebuild of indexes on each table. Parallel index builds are only supported for full-table repacks, not with ``--index`` or ``--only-indexes`` options. If your PostgreSQL server has extra cores and disk I/O available, this can be a useful way to speed up pg_repack. ``-j``, ``--jobs`` 指定した数だけ追加でPostgreSQLへのコネクションを作成し、それらのコネクションを使って並列でインデックス作成処理を行います。並列でのインデックス作成は、テーブル全体を再編成する場合にのみ有効です。 ``--index`` や ``--only-indexes`` オプションとは同時に利用できません。PostgreSQLサーバのCPUコア数およびディスクI/Oに余裕がある場合には、このオプションを利用することでpg_repackの処理を高速化するための有力な手段になりえます。 .. ``-s TBLSPC``, ``--tablespace=TBLSPC`` Move the repacked tables to the specified tablespace: essentially an online version of ``ALTER TABLE ... SET TABLESPACE``. The tables' indexes are left in the original tablespace unless ``--moveidx`` is specified too. ``-s TBLSPC``, ``--tablespace=TBLSPC`` 再編成したテーブルを指定したテーブル空間に移動します。即ち、 ``ALTER TABLE ... SET TABLESPACE`` 相当の処理をオンラインで実施します。 ``--moveidx`` オプションを併用しない限り、再編成したテーブルのインデックスは元のテーブル空間に残されます。 .. ``-S``, ``--moveidx`` Also move the indexes of the repacked tables to the tablespace specified by the ``--tablespace`` option. ``-S``, ``--moveidx`` ``--tablespace`` オプションと併用することで、再編成したテーブルのインデックスも指定したテーブル空間に移動します。 .. ``-i``, ``--index`` Repack the specified index(es) only. Multiple indexes may be repacked by writing multiple ``-i`` switches. May be used in conjunction with ``--tablespace`` to move the index to a different tablespace. ``-i``, ``--index`` 指定したインデックスのみを再編成します。 ``-i`` オプションを複数同時に指定することで、複数のインデックスを指定することができます。 ``--tablespace`` オプションと同時に使用することで、特定のスキーマのテーブルを別のテーブル空間に移動する利用例が挙げられます。 .. ``-x``, ``--only-indexes`` Repack only the indexes of the specified table(s), which must be specified with the ``--table`` or ``--parent-table`` option. ``-x``, ``--only-indexes`` ``--table`` または ``--parent-table`` オプションと併用することで、指定したテーブルのインデックスのみを再編成します。 .. ``-T SECS``, ``--wait-timeout=SECS`` pg_repack needs to take an exclusive lock at the end of the reorganization. This setting controls how many seconds pg_repack will wait to acquire this lock. If the lock cannot be taken after this duration and ``--no-kill-backend`` option is not specified, pg_repack will forcibly cancel the conflicting queries. If you are using PostgreSQL version 8.4 or newer, pg_repack will fall back to using pg_terminate_backend() to disconnect any remaining backends after twice this timeout has passed. The default is 60 seconds. ``-T SECS``, ``--wait-timeout=SECS`` pg_repackは再編成の完了直前に排他ロックを利用します。このオプションは、このロック取得時に何秒間pg_repackが取得を待機するかを指定します。指定した時間経ってもロックが取得できないかつ、``no-kill-backend``\オプションが指定されていない場合、pg_repackは競合するクエリを強制的にキャンセルさせます。PostgreSQL 8.4以上のバージョンを利用している場合、指定した時間の2倍以上経ってもロックが取得できない場合、pg_repackは競合するクエリを実行しているPostgreSQLバックエンドプロセスをpg_terminate_backend()関数により強制的に停止させます。このオプションのデフォルトは60秒です。 .. ``-D``, ``--no-kill-backend`` Skip to repack table if the lock cannot be taken for duration specified ``--wait-timeout``, instead of cancelling conflicting queries. The default is false. ``-D``, ``--no-kill-backend`` ``--wait-timeout``\オプションで指定された時間が経過してもロックが取得できない場合、競合するクエリをキャンセルする代わりに対象テーブルの再編成をスキップします。 .. ``-Z``, ``--no-analyze`` Disable ANALYZE after a full-table reorganization. If not specified, run ANALYZE after the reorganization. ``-Z``, ``--no-analyze`` 再編成終了後にANALYZEを行うことを無効にします。デフォルトでは再編成完了後に統計情報を更新するためANALYZEを実行します。 .. ``-k``, ``--no-superuser-check`` Skip the superuser checks in the client. This setting is useful for using pg_repack on platforms that support running it as non-superusers. ``-k``, ``--no-superuser-check`` 接続ユーザがスーパーユーザかどうかのチェックを行いません。これは、非スーパーユーザのみが利用できる環境でpg_repackを使用するときに有用です。 .. Connection Options ^^^^^^^^^^^^^^^^^^ Options to connect to servers. You cannot use ``--all`` and ``--dbname`` or ``--table`` or ``--parent-table`` together. 接続オプション --------------- PostgreSQLサーバに接続するためのオプションです。 ``--all`` オプションと同時に ``--dbname`` 、 ``--table`` や ``--parent-table`` を利用することはできません。 .. ``-a``, ``--all`` Reorganize all databases. ``-a``, ``--all`` すべてのデータベースを再編成します。 .. ``-d DBNAME``, ``--dbname=DBNAME`` Specifies the name of the database to be reorganized. If this is not specified and ``-a`` (or ``--all``) is not used, the database name is read from the environment variable PGDATABASE. If that is not set, the user name specified for the connection is used. ``-d DBNAME``, ``--dbname=DBNAME`` 指定したデータベースのみを再編成します。このオプションや ``-a`` ( ``--all`` )オプションを指定しなかった場合、環境変数PGDATABASEで指定されたデータベースを再編成します。PGDATABASEも指定されていない場合、接続に利用するユーザ名と同じ名称のデータベースを再編成します。 .. ``-h HOSTNAME``, ``--host=HOSTNAME`` Specifies the host name of the machine on which the server is running. If the value begins with a slash, it is used as the directory for the Unix domain socket. ``-h HOSTNAME``, ``--host=HOSTNAME`` 指定したホスト名を持つサーバ上のPostgreSQLに接続します。指定した値が ``/`` で始まる場合、Unixドメインソケットが配置されたディレクトリと解釈して接続します。 .. ``-p PORT``, ``--port=PORT`` Specifies the TCP port or local Unix domain socket file extension on which the server is listening for connections. ``-p PORT``, ``--port=PORT`` 指定したポート番号でPostgreSQLサーバに接続します。 .. ``-U USERNAME``, ``--username=USERNAME`` User name to connect as. ``-U USERNAME``, ``--username=USERNAME`` 指定したユーザ名でPostgreSQLサーバに接続します。 .. ``-w``, ``--no-password`` Never issue a password prompt. If the server requires password authentication and a password is not available by other means such as a ``.pgpass`` file, the connection attempt will fail. This option can be useful in batch jobs and scripts where no user is present to enter a password. ``-w``, ``--no-password`` 接続時にパスワード入力プロンプトを表示されないようにします。もし接続先のPostgreSQLサーバがパスワード認証を要求していて、パスワードが``.pgpass``ファイルなどの手段で取得できない場合、pg_repackは接続に失敗します。このオプションはパスワード入力なしで接続できるユーザを用いたバッチ処理やスクリプトにて利用します。 .. ``-W``, ``--password`` Force the program to prompt for a password before connecting to a database. This option is never essential, since the program will automatically prompt for a password if the server demands password authentication. However, pg_repack will waste a connection attempt finding out that the server wants a password. In some cases it is worth typing ``-W`` to avoid the extra connection attempt. ``-W``, ``--password`` 接続時にパスワード入力プロンプトを強制的に表示します。 サーバがパスワード認証を要求する場合、そもそも自動的にパスワード入力が促されるため、このオプションが重要になることはありません。 しかし、サーバにパスワードが必要かどうかを判断するための接続試行を無駄に行います。 こうした余計な接続試行を防ぎたいのであれば、このオプションが利用してください。 .. Generic Options ^^^^^^^^^^^^^^^ 一般オプション -------------- .. ``-e``, ``--echo`` Echo commands sent to server. ``-e``, ``--echo`` サーバに送信するSQLを表示します。 .. ``-E LEVEL``, ``--elevel=LEVEL`` Choose the output message level from ``DEBUG``, ``INFO``, ``NOTICE``, ``WARNING``, ``ERROR``, ``LOG``, ``FATAL``, and ``PANIC``. The default is ``INFO``. ``-E LEVEL``, ``--elevel=LEVEL`` ログ出力レベルを設定します。 ``DEBUG``, ``INFO``. ``NOTICE``, ``WARNING``, ``ERROR``, ``LOG``, ``FATAL``, ``PANIC`` から選択できます。デフォルトは ``INFO`` です。 .. ``--help`` Show usage of the program. ``--help`` 利用方法についての説明を表示します。 .. ``--version`` Show the version number of the program. ``--version`` バージョン情報を表示します。 .. Environment ----------- ``PGDATABASE``, ``PGHOST``, ``PGPORT``, ``PGUSER`` Default connection parameters This utility, like most other PostgreSQL utilities, also uses the environment variables supported by libpq (see `Environment Variables`__). .. __: http://www.postgresql.jp/document/current/html/libpq-envars.html 環境変数 --------- ``PGDATABASE``, ``PGHOST``, ``PGPORT``, ``PGUSER`` 接続パラメータのデフォルト値として利用されます。   また、このユーティリティは、他のほとんどの PostgreSQL ユーティリティと同様、libpq でサポートされる環境変数を使用します。詳細については、 `環境変数`__ の項目を参照してください。 .. __: http://www.postgresql.jp/document/current/html/libpq-envars.html .. Examples -------- Perform an online CLUSTER of all the clustered tables in the database ``test``, and perform an online VACUUM FULL of all the non-clustered tables:: $ pg_repack test Perform an online VACUUM FULL on the tables ``foo`` and ``bar`` in the database ``test`` (an eventual cluster index is ignored):: $ pg_repack --no-order --table foo --table bar test Move all indexes of table ``foo`` to tablespace ``tbs``:: $ pg_repack -d test --table foo --only-indexes --tablespace tbs Move the specified index to tablespace ``tbs``:: $ pg_repack -d test --index idx --tablespace tbs 利用例 ------- 以下のコマンドは、 ``test`` データベースのクラスタ可能なテーブル全てに対してオンラインCLUSTERを行い、その他のテーブルに対してオンラインVACUUM FULLを行います。:: $ pg_repack test ``test`` データベースの ``foo`` テーブルと ``bar`` テーブルに対してオンラインVACUUM FULLを実行するには、以下のようにします。 :: $ pg_repack --no-order --table foo --table bar test ``foo`` テーブルのインデックス全てをテーブル空間 ``tbs`` に移動するには、以下のようにします。 :: $ pg_repack -d test --table foo --only-indexes --tablespace tbs インデックス ``idx`` をテーブル空間 ``tbs`` に移動するには、以下のようにします。 :: $ pg_repack -d test --index idx --tablespace tbs .. Diagnostics ----------- トラブルシューティング ---------------------- .. Error messages are reported when pg_repack fails. The following list shows the cause of errors. You need to cleanup by hand after fatal errors. To cleanup, just remove pg_repack from the database and install it again: for PostgreSQL 9.1 and following execute ``DROP EXTENSION pg_repack CASCADE`` in the database where the error occurred, followed by ``CREATE EXTENSION pg_repack``; for previous version load the script ``$SHAREDIR/contrib/uninstall_pg_repack.sql`` into the database where the error occured and then load ``$SHAREDIR/contrib/pg_repack.sql`` again. pg_repackが失敗した場合、エラーメッセージが表示されます。 エラーの原因について以下に列記します。 FATALエラーが発生した場合、手動でクリーンアップを行う必要があります。 クリーンアップするには、pg_repackをデータベースから一度削除し、再度登録するだけです。 PostgreSQL 9.1以降では、 ``DROP EXTENSION pg_repack CASCADE`` をエラーが起きた データベースで実行し、続いて ``CREATE EXTENSION pg_repack`` を実行します。 これより古いバージョンの場合、 ``$SHAREDIR/contrib/uninstall_pg_repack.sql`` スクリプトをエラーが起きたデータベースに対して実行し、その後 ``$SHAREDIR/contrib/pg_repack.sql`` を同様に実行します。 .. INFO: database "db" skipped: pg_repack VER is not installed in the database pg_repack is not installed in the database when the ``--all`` option is specified. Create the pg_repack extension in the database. .. class:: diag INFO: database "db" skipped: pg_repack VER is not installed in the database ``--all`` オプション指定時に、pg_repackがインストールされていない データベースに対して表示されます。 該当のデータベースに対してpg_repackをインストールしてください。 .. ERROR: pg_repack VER is not installed in the database pg_repack is not installed in the database specified by ``--dbname``. Create the pg_repack extension in the database. .. class:: diag ERROR: pg_repack VER is not installed in the database ``--dbname`` オプション指定時に、指定したデータベースにpg_repackが インストールされていない場合に表示されます。 該当のデータベースに対してpg_repackをインストールしてください。 .. ERROR: program 'pg_repack V1' does not match database library 'pg_repack V2' There is a mismatch between the ``pg_repack`` binary and the database library (``.so`` or ``.dll``). The mismatch could be due to the wrong binary in the ``$PATH`` or the wrong database being addressed. Check the program directory and the database; if they are what expected you may need to repeat pg_repack installation. .. class:: diag ERROR: program 'pg_repack V1' does not match database library 'pg_repack V2' There is a mismatch between the ``pg_repack`` binary and the database library (``.so`` or ``.dll``). データベースに登録されたpg_repackがバージョン2系であるのに、クライアント側 コマンドのpg_repackのバージョンが1系である場合に表示されます。 ``$PATH`` に誤ったpg_repackのバイナリを指定していたり、接続先のデータベースが 間違っている可能性があります。pg_repackプログラムがインストールされた ディレクトリとデータベースを確認してください。それらが適切である場合、 pg_repackを再インストールしてください。 .. ERROR: extension 'pg_repack V1' required, found extension 'pg_repack V2' The SQL extension found in the database does not match the version required by the pg_repack program. You should drop the extension from the database and reload it as described in the installation_ section. .. class:: diag ERROR: extension 'pg_repack V1' required, found extension 'pg_repack V2' クライアント側のpg_repackがバージョン1系であるのに、データベース側に 登録されたpg_repackがバージョン2系の場合に表示されます。 当該データベースからpg_repackを削除し、 `インストール`_ に従って 再登録してください。 .. ERROR: relation "table" must have a primary key or not-null unique keys The target table doesn't have a PRIMARY KEY or any UNIQUE constraints defined. Define a PRIMARY KEY or a UNIQUE constraint on the table. .. class:: diag ERROR: relation "table" must have a primary key or not-null unique keys 対象のテーブルが主キーもしくはNOT NULLなユニーク制約を持っていない場合に表示されます。 主キーもしくはユニーク制約を定義してください。 .. ERROR: query failed: ERROR: column "col" does not exist The target table doesn't have columns specified by ``--order-by`` option. Specify existing columns. .. class:: diag ERROR: query failed: ERROR: column "col" does not exist 対象のテーブルが ``--order-by`` オプションで指定したカラムを持っていない場合に表示されます。 存在しているカラムを指定してください。 .. WARNING: the table "tbl" already has a trigger called a_repack_trigger The trigger was probably installed during a previous attempt to run pg_repack on the table which was interrupted and for some reason failed to clean up the temporary objects. You can remove all the temporary objects by dropping and re-creating the extension: see the installation_ section for the details. .. class:: diag WARNING: the table "tbl" already has a trigger called repack_trigger 以前に実行したが何らかの理由で中断したか、あるいは失敗したpg_repackコマンドにより、 対象テーブルにpg_repackが利用するトリガが残存している場合に表示されます。 pg_repackを一度削除して、再度登録することで、こうした一時オブジェクトを削除できます。 `インストール`_ を参照してください。 .. WARNING: trigger "trg" conflicting on table "tbl" The target table has a trigger whose name follows ``repack_trigger`` in alphabetical order. The ``repack_trigger`` should be the first AFTER trigger to fire. Please rename your trigger so that it sorts alphabetically before pg_repack's one; you can use:: ALTER TRIGGER aaa_my_trigger ON sometable RENAME TO bbb_my_trigger; .. class:: diag ERROR: Another pg_repack command may be running on the table. Please try again 同じテーブルに複数のpg_repackが同時に実行されている場合に表示されます。 これはデッドロックを引き起こす可能性があるため、片方のpg_repackが終了するのを 待って再度実行してください。 .. WARNING: Cannot create index "schema"."index_xxxxx", already exists DETAIL: An invalid index may have been left behind by a previous pg_repack on the table which was interrupted. Please use DROP INDEX "schema"."index_xxxxx" to remove this index and try again. A temporary index apparently created by pg_repack has been left behind, and we do not want to risk dropping this index ourselves. If the index was in fact created by an old pg_repack job which didn't get cleaned up, you should just use DROP INDEX and try the repack command again. .. class:: diag WARNING: Cannot create index "schema"."index_xxxxx", already exists DETAIL: An invalid index may have been left behind by a previous pg_repack on the table which was interrupted. Please use DROP INDEX "schema"."index_xxxxx" to remove this index and try again. 以前に実行したが何らかの理由で中断したか、あるいは失敗したpg_repackコマンドにより、 pg_repackが利用する一時的なインデックスが残存している場合に表示されます。 DROP INDEXコマンドにより該当のインデックスを削除して、pg_repackを再実行してください。 .. Restrictions ------------ pg_repack comes with the following restrictions. 制約 ----- pg_repackには以下の制約があります。 .. Temp tables ^^^^^^^^^^^ pg_repack cannot reorganize temp tables. 一時テーブル ^^^^^^^^^^^^ pg_repackは一時テーブルは再編成できません。 .. GiST indexes ^^^^^^^^^^^^ pg_repack cannot reorganize tables using GiST indexes. GiSTインデックス ^^^^^^^^^^^^^^^^ pg_repackはGiSTインデックスを使ってテーブルを再編成することはできません。 .. DDL commands ^^^^^^^^^^^^ You will not be able to perform DDL commands of the target table(s) **except** VACUUM or ANALYZE while pg_repack is working. pg_repack will hold an ACCESS SHARE lock on the target table during a full-table repack, to enforce this restriction. If you are using version 1.1.8 or earlier, you must not attempt to perform any DDL commands on the target table(s) while pg_repack is running. In many cases pg_repack would fail and rollback correctly, but there were some cases in these earlier versions which could result in data corruption. DDLコマンド ^^^^^^^^^^^^ pg_repackを実行している間、VACUUMもしくはANALYZE以外のDDLコマンドを対象の テーブルに対して実行することはできません。何故ならば、pg_repackは ACCESS SHAREロックを対象テーブルに対して保持しつづけるからです。 バージョン1.1.8もしくはそれ以前のバージョンを使っている場合、あらゆるDDL コマンドをpg_repackが走っているテーブルに対して実行することができません。 大抵はpg_repackが失敗してロールバックが適切に行われますが、古いバージョンでは いくつかのケースでデータ不整合を引き起こす可能性があります。 .. Details ------- 動作詳細 --------- .. Full Table Repacks ^^^^^^^^^^^^^^^^^^ To perform a full-table repack, pg_repack will: 1. create a log table to record changes made to the original table 2. add a trigger onto the original table, logging INSERTs, UPDATEs and DELETEs into our log table 3. create a new table containing all the rows in the old table 4. build indexes on this new table 5. apply all changes which have accrued in the log table to the new table 6. swap the tables, including indexes and toast tables, using the system catalogs 7. drop the original table pg_repack will only hold an ACCESS EXCLUSIVE lock for a short period during initial setup (steps 1 and 2 above) and during the final swap-and-drop phase (steps 6 and 7). For the rest of its time, pg_repack only needs to hold an ACCESS SHARE lock on the original table, meaning INSERTs, UPDATEs, and DELETEs may proceed as usual. テーブル再編成 ^^^^^^^^^^^^^^^ テーブル全体を再編成する場合、pg_repackは以下のように動作します: 1. 対象のテーブルに対して実行される変更を記録するためのログテーブルを作成します 2. 対象のテーブルに、INSERT、UPDATE、DELETEが行われた際にログテーブルに変更内容を記録するトリガを追加します 3. 対象テーブルに含まれるレコードを元に、新しいテーブルを指定した編成順でレコードを並ばせながら作成します 4. 新しいテーブルに対してインデックスを作成します 5. 再編成中に行われた元のテーブルに対する変更内容をログテーブルから取り出し、新しいテーブルに反映します 6. システムカタログを更新し、元のテーブルと新しいテーブルを入れ替えます。インデックスやトーストテーブルも入れ替えます 7. 元のテーブルを削除します pg_repackは上の手順の中で、始めの1.と2.の時点、および最後の6.と7.の時点で対象のテーブルに対する ACCESS EXCLUSIVEロックを取得します。その他のステップでは、ACCESS SHAREロックを必要とするだけなので、 元のテーブルに対するINSERT, UPDATE, DELETE操作は通常通りに実行されます。 .. Index Only Repacks ^^^^^^^^^^^^^^^^^^ To perform an index-only repack, pg_repack will: 1. create new indexes on the table using CONCURRENTLY matching the definitions of the old indexes 2. swap out the old for the new indexes in the catalogs 3. drop the old indexes Creating indexes concurrently comes with a few caveats, please see `the documentation`__ for details. .. __: http://www.postgresql.jp/document/current/html/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY インデックスのみの再編成 ^^^^^^^^^^^^^^^^^^^^^^^^^ インデックスのみ再編成する場合、pg_repackは以下のように動作します: 1. 元のインデックス定義に添って、新しいインデックスをCONCURRENTLYオプションを利用して作成します 2. システムカタログを更新し、元のインデックスと新しいインデックスを入れ替えます 3. 元のインデックスを削除します インデックス作成のCONCURRENTLYオプションにはいくつかの注意点があります。 詳細は、 `PostgreSQLドキュメント`__ を参照してください。 .. __: http://www.postgresql.jp/document/current/html/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY .. Releases -------- リリースノート --------------- .. * pg_repack 1.4.3 .. * Fixed possible CVE-2018-1058 attack paths (issue #168) .. * Fixed "unexpected index definition" after CVE-2018-1058 changes in .. PostgreSQL (issue #169) .. * Fixed build with recent Ubuntu packages (issue #179) * pg_repack 1.4.3 * CVE-2018-1058を利用した攻撃の可能性を修正しました (issue #168) * PostgreSQLでのCVE-2018-1058の修正により"unexpected index definition"エラーが発生する事象を修正しました (issue #169) * 最近のUbuntuパッケージでビルドが失敗する事象を修正しました (issue #179) .. * pg_repack 1.4.2 .. * added PostgreSQL 10 support (issue #120) .. * fixed error DROP INDEX CONCURRENTLY cannot run inside a transaction block (issue #129) * pg_repack 1.4.2 * PostgreSQL 10をサポートしました (issue #120) * エラー「DROP INDEX CONCURRENTLY cannot run inside a transaction block」が発生する事象を修正しました (issue #129) .. * pg_repack 1.4.1 .. * fixed broken ``--order-by`` option (issue #138) * pg_repack 1.4.1 * 壊れていた ``--order-by`` オプションを修正しました (issue #138) .. * pg_repack 1.4 .. * added support for PostgreSQL 9.6 .. * use ``AFTER`` trigger to solve concurrency problems with ``INSERT .. CONFLICT`` (issue #106) .. * added ``--no-kill-backend`` option (issue #108) .. * added ``--no-superuser-check`` option (issue #114) .. * added ``--exclude-extension`` option (#97) .. * added ``--parent-table`` option (#117) .. * restore TOAST storage parameters on repacked tables (issue #10) .. * restore columns storage types in repacked tables (issue #94) * pg_repack 1.4 * PostgreSQL 9.6をサポートしました * ``INSERT CONFLICT`` を同時実行した際の問題を解決するために、 ``AFTER`` トリガを使うようにしました(issue #106) * ``--no-kill-backend`` オプションを追加しました (issue #108) * ``--no-superuser-check`` オプションを追加しました (issue #114) * ``--exclude-extension`` オプションを追加しました (#97) * ``--parent-table`` オプションを追加しました(#117) * TOASTテーブルの格納オプションを再編成後のテーブルに再設定するようにしました (issue #10) * 列の格納タイプを再編成後のテーブルに再設定するようにしました (issue #94) .. * pg_repack 1.3.4 .. * grab exclusive lock before dropping original table (#81) .. * do not attempt to repack unlogged table (#71) * pg_repack 1.3.4 * 元テーブルを削除する前に排他ロックを取得するようにしました(#81) * Unlogged Tableを再編成対象から外すようにしました (#71) .. * pg_repack 1.3.3 .. * Added support for PostgreSQL 9.5 .. * Fixed possible deadlock when pg_repack command is interrupted (issue #55) .. * Fixed exit code for when pg_repack is invoked with ``--help`` and .. ``--version`` .. * Added Japanese language user manual * pg_repack 1.3.3 * PostgreSQL 9.5をサポートしました * pg_repackが中断されたときにデッドロックが発生する可能性を修正しました (issue #55) * ``--help`` または ``--version`` オプションを指定した実行したときの終了コードを修正しました * 日本語のユーザマニュアルを追加しました .. * pg_repack 1.3.2 .. * Fixed to clean up temporary objects when pg_repack command is interrupted. .. * Fixed possible crash when pg_repack shared library is loaded a alongside .. pg_statsinfo (issue #43) * pg_repack 1.3.2 * pg_repackが中断されたときに一時オブジェクトを削除するようにしました * pg_statsinfoと同時にロードされている時にクラッシュする可能性を修正しました .. * pg_repack 1.3.1 .. * Added support for PostgreSQL 9.4. * pg_repack 1.3.1 * PostgreSQL 9.4をサポートしました .. * pg_repack 1.3 .. * Added ``--schema`` to repack only the specified schema (issue #20). .. * Added ``--dry-run`` to do a dry run (issue #21). .. * Fixed advisory locking for >2B OID values (issue #30). .. * Avoid possible deadlock when other sessions lock a to-be-repacked table (issue #32). .. * Performance improvement for performing sql_pop DELETEs many-at-a-time. .. * Attempt to avoid pg_repack taking forever when dealing with a constant heavy stream of changes to a table. * pg_repack 1.3 * 特定のスキーマのみを再編成対象とする ``--schema`` オプションを追加しました ( issue #20) * ドライランのための ``--dry-run`` オプションを追加しました (issue #21) * 勧告的ロックを取得する際のOIDの扱いを修正しました (issue #30) * 再編成予定のテーブルに対して別のセッションたロックを保持している場合にデッドロックが起きないように修正しました (issue #32) * 一度に複数のDELETE操作をsql_popで取り扱う際の性能を改善しました * 常に高負荷の更新が行われているテーブルに対する再編成処理が終わらない事象が起きないように修正しました .. * pg_repack 1.2 * Support PostgreSQL 9.3. * Added ``--tablespace`` and ``--moveidx`` options to perform online SET TABLESPACE. * Added ``--index`` to repack the specified index only. * Added ``--only-indexes`` to repack only the indexes of the specified table * Added ``--jobs`` option for parallel operation. * Don't require ``--no-order`` to perform a VACUUM FULL on non-clustered tables (pg_repack issue #6). * Don't wait for locks held in other databases (pg_repack issue #11). * Bugfix: correctly handle key indexes with options such as DESC, NULL FIRST/LAST, COLLATE (pg_repack issue #3). * Fixed data corruption bug on delete (pg_repack issue #23). * More helpful program output and error messages. * pg_repack 1.2 * PostgreSQL 9.3をサポートしました * オンラインSET TABLESPACE文に相当する処理を行うためのオプション ``--tablespace``, ``--moveidx`` を追加しました * 特定のインデックスのみを再編成するためのオプション ``--index`` を追加しました * 特定のテーブルのインデックスをまとめて再編成するオプション ``--only-indexes`` を追加しました * 並列実行のためのオプション ``--jobs`` を追加しました * クラスタキーを持たないテーブルに対してVACUUM FULL相当の処理を行うために ``--no-order`` オプションを明示的に指定しなくてもよいようにしました (pg_repack issue #6) * 他のデータベースにおけるロックを待たないようにしました (pg_repack issue #11) * バグ修正: DESC, NULL FIRST/LAST, COLLATEを持つインデックスキーを正しく取り扱えるように修正しました (pg_repack issue #3) * 同時に行われる削除操作によってデータ破壊が起こる可能性があったため修正しました (pg_repack issue #23) * 出力メッセージとエラーメッセージを改善しました .. * pg_repack 1.1.8 * Added support for PostgreSQL 9.2. * Added support for CREATE EXTENSION on PostgreSQL 9.1 and following. * Give user feedback while waiting for transactions to finish (pg_reorg issue #5). * Bugfix: Allow running on newly promoted streaming replication slaves (pg_reorg issue #1). * Bugfix: Fix interaction between pg_repack and Slony 2.0/2.1 (pg_reorg issue #4) * Bugfix: Properly escape column names (pg_reorg issue #6). * Bugfix: Avoid recreating invalid indexes, or choosing them as key (pg_reorg issue #9). * Bugfix: Never choose a partial index as primary key (pg_reorg issue #22). * pg_repack 1.1.8 * PostgreSQL 9.2をサポートしました * PostgreSQL 9.1およびそれ以降のバージョンでCREATE EXTENSIONによるインストールが行えるようにしました * 他のトランザクションの終了を待っていることをユーザに通知するようにしました (pg_reorg issue #5) * バグ修正: ストリーミングレプリケーション構成において、新たにマスタに昇格したサーバ上で動作するように修正しました (pg_reorg issue #1) * バグ修正: pg_repackとSlony 2.0/2.1が競合しないように修正しました (pg_reorg issue #4) * バグ修正: カラム名を適切にエスケープするように修正しました (pg_reorg issue #6) * バグ修正: invalidなインデックスを再編成の対象としたり、クラスタキーとして扱うことがないように修正しました (pg_reorg issue #9) * バグ修正: 部分インデックスを主キーとして選択しないように修正しました (pg_reorg issue #22) .. * pg_reorg 1.1.7 (2011-08-07) * Bugfix: VIEWs and FUNCTIONs could be corrupted that used a reorganized table which has a dropped column. * Supports PostgreSQL 9.1 and 9.2dev. (but EXTENSION is not yet) * pg_reorg 1.1.7 (2011-08-07) * バグ修正: 削除されたカラムを持つテーブルを再編成した際に、そのテーブルに対するビューや関数が壊れないように修正しました * PostgreSQL 9.1および9.2devをサポートしました (EXTENSIONはまだサポートしていません) .. See Also -------- 関連項目 -------- * `clusterdb `__ * `vacuumdb `__ pg_repack-ver_1.5.0/doc/release.rst000066400000000000000000000037521452746007700172660ustar00rootroot00000000000000What to do to release pg_repack =============================== This document is the list of operations to do to release a new pg_repack version. The version number in this document is indicated by ``$VER``: it should be a three-digit dot-separated version, eventually followed by a pre-release string: ``1.2.0``, ``1.2.1``, ``1.2-dev0``, ``1.2.0-beta1`` are valid version numbers. In order to release the package you will need accounts on Github and PGXN with the right privileges: contact Daniele Varrazzo to obtain them. - Set the right version number in ``META.json`` (note: it's in two different places). - Set the right release_status in ``META.json``: ``testing`` or ``stable``. - Commit the above metadata changes. - Create a package running ``make package``. The package will be called ``dist/pg_repack-$VER.zip``. - Verify the packages installs and passes tests with `pgxn client`__:: pgxn install --sudo -- dist/pg_repack-$VER.zip pgxn check dist/pg_repack-$VER.zip (note that ``check`` may require the Postgres bin directory to be added to the path, e.g. ``PATH=$(pg_config --bindir):$PATH``; check the ``install`` log to see where ``pg_repack`` executable was installed). .. __: https://pgxn.github.io/pgxnclient/ - Push the code changes on github:: git push - Upload the package on http://manager.pgxn.org/. - Check the uploaded package works as expected; if not fix and push more:: pgxn install --sudo -- pg_repack pgxn check pg_repack - Create a tag, signed if possible:: git tag -a -s ver_$VER - Push the new tag on github:: git push --tags - Upload the docs by pushing in the repos at http://reorg.github.io/pg_repack/. The operations are roughly:: git clone --recursive git@github.com:reorg/reorg.github.com.git cd reorg.github.com make sm make git commit -a -m "Docs upload for release $VER" git push - Check the page http://reorg.github.io/pg_repack/ is right. - Announce the package on pgsql-announce@postgresql.org. pg_repack-ver_1.5.0/doc/style.css000066400000000000000000000031101452746007700167520ustar00rootroot00000000000000body { font-family: Lucida Grande, Verdana, Arial, Helvetica, 'メイリオ', 'Meiryo', 'ヒラギノ角ゴ Pro W3', 'Hiragino Kaku Gothic Pro', 'Osaka', 'MS Pゴシック', sans-serif; color: #202020; } h2, h3, h4, h5, h6 { color: Black; background: none; padding-top: 0.5em; padding-bottom: 0.17em; border-bottom: 1px solid #aaaaaa; } H1 { font-size: x-large; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } H2 { font-size: large; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } H3 { padding-left: 1em; font-size: medium; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } H4 { padding-left: 2em; font-size: small; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } H5 { padding-left: 3em; font-size: x-small; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } H6 { padding-left: 4em; font-size: xx-small; font-family: Lucida Grande,verdana,arial,helvetica,sans-serif; } pre { font-family: courier,sans-serif; background-color: #FBFBFD; border: 1px dashed #7E7ECB; color: black; line-height: 1.1em; padding: 0.5em; overflow: auto; } li { line-height: 1.4em; } div.contents { float:right; border:thin solid black; background-color: white; padding-top: 0.2em; padding-bottom: 0.2em; padding-left: 1em; padding-right: 1em; margin-left: 0.5em; margin-top: 2.5em !important; } div.contents ul { padding-left: 1em; } dl.diag dt, dl.ddl dt { font-weight: bold; margin-top: 1em; margin-bottom: 0.5em; } pg_repack-ver_1.5.0/lib/000077500000000000000000000000001452746007700151065ustar00rootroot00000000000000pg_repack-ver_1.5.0/lib/.gitignore000066400000000000000000000001021452746007700170670ustar00rootroot00000000000000/.deps/ /pg_repack.sql /pg_repack--[0-9.]*.sql /pg_repack.control pg_repack-ver_1.5.0/lib/Makefile000066400000000000000000000034301452746007700165460ustar00rootroot00000000000000# # pg_repack: lib/Makefile # # Portions Copyright (c) 2008-2012, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # Portions Copyright (c) 2011, Itagaki Takahiro # Portions Copyright (c) 2012-2020, The Reorg Development Team # PG_CONFIG ?= pg_config # version as a number, e.g. 9.1.4 -> 901 VERSION := $(shell $(PG_CONFIG) --version | sed 's/.* \([[:digit:].]\{1,\}\).*/\1/') INTVERSION := $(shell echo $$(($$(echo $(VERSION).0 | sed 's/\([[:digit:]]\{1,\}\)\.\([[:digit:]]\{1,\}\).*/\1*100+\2/')))) EXTENSION = pg_repack MODULE_big = $(EXTENSION) OBJS = repack.o pgut/pgut-spi.o SHLIB_EXPORTS = exports.txt # It is not possible to create tables with OIDs on PostgreSQL 12 or later ifeq ($(shell echo $$(($(INTVERSION) < 1200))),1) RELHASOIDS := relhasoids else RELHASOIDS := false endif # The version number of the program. It should be the same of the library. REPACK_VERSION = $(shell grep '"version":' ../META.json | head -1 \ | sed -e 's/[ ]*"version":[ ]*"\(.*\)",/\1/') PG_CPPFLAGS = -DREPACK_VERSION=$(REPACK_VERSION) DATA_built = pg_repack--$(REPACK_VERSION).sql pg_repack.control USE_PGXS = 1 PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) # remove dependency on libxml2, libxslt, and libpam. # XXX: find a better way to make sure we are linking with libraries # from pg_config which we actually need. LIBS := $(filter-out -lpam -lxml2 -lxslt, $(LIBS)) pg_repack.sql: pg_repack.sql.in echo "BEGIN;" > $@; \ sed 's,MODULE_PATHNAME,$$libdir/$(MODULE_big),g' $< \ | sed 's,REPACK_VERSION,$(REPACK_VERSION),g' >> $@; \ echo "COMMIT;" >> $@; pg_repack--$(REPACK_VERSION).sql: pg_repack.sql.in sed 's,REPACK_VERSION,$(REPACK_VERSION),g' $< \ | sed 's,relhasoids,$(RELHASOIDS),g'> $@; pg_repack.control: pg_repack.control.in sed 's,REPACK_VERSION,$(REPACK_VERSION),g' $< > $@ pg_repack-ver_1.5.0/lib/exports.txt000066400000000000000000000016611452746007700173570ustar00rootroot00000000000000Pg_magic_func 1 pg_finfo_repack_apply 2 pg_finfo_repack_disable_autovacuum 3 pg_finfo_repack_drop 4 pg_finfo_repack_get_order_by 5 pg_finfo_repack_indexdef 6 pg_finfo_repack_swap 7 pg_finfo_repack_trigger 8 pg_finfo_repack_version 9 pg_finfo_repack_index_swap 10 pg_finfo_repack_get_table_and_inheritors 11 repack_apply 12 repack_disable_autovacuum 13 repack_drop 14 repack_get_order_by 15 repack_indexdef 16 repack_swap 17 repack_trigger 18 repack_version 19 repack_index_swap 20 repack_get_table_and_inheritors 21 pg_repack-ver_1.5.0/lib/pg_repack.control.in000066400000000000000000000002741452746007700210530ustar00rootroot00000000000000# pg_repack extension comment = 'Reorganize tables in PostgreSQL databases with minimal locks' default_version = 'REPACK_VERSION' module_pathname = '$libdir/pg_repack' relocatable = false pg_repack-ver_1.5.0/lib/pg_repack.sql.in000066400000000000000000000300511452746007700201660ustar00rootroot00000000000000/* * pg_repack: lib/pg_repack.sql.in * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team */ CREATE SCHEMA repack; CREATE FUNCTION repack.version() RETURNS text AS 'MODULE_PATHNAME', 'repack_version' LANGUAGE C IMMUTABLE STRICT; CREATE FUNCTION repack.version_sql() RETURNS text AS $$SELECT 'pg_repack REPACK_VERSION'::text$$ LANGUAGE SQL IMMUTABLE STRICT; -- Always specify search_path to 'pg_catalog' so that we -- always can get schema-qualified relation name CREATE FUNCTION repack.oid2text(oid) RETURNS text AS $$ SELECT textin(regclassout($1)); $$ LANGUAGE sql STABLE STRICT SET search_path to 'pg_catalog'; -- Get a comma-separated column list of the index. -- -- Columns are quoted as literals because they are going to be passed to -- the `repack_trigger` function as text arguments. `repack_trigger` will quote -- them as identifiers later. CREATE FUNCTION repack.get_index_columns(oid) RETURNS text AS $$ SELECT coalesce(string_agg(quote_literal(attname), ', '), '') FROM pg_attribute, (SELECT indrelid, indkey, generate_series(0, indnatts-1) AS i FROM pg_index WHERE indexrelid = $1 ) AS keys WHERE attrelid = indrelid AND attnum = indkey[i]; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.get_order_by(oid, oid) RETURNS text AS 'MODULE_PATHNAME', 'repack_get_order_by' LANGUAGE C STABLE STRICT; CREATE FUNCTION repack.create_log_table(oid) RETURNS void AS $$ BEGIN EXECUTE 'CREATE TABLE repack.log_' || $1 || ' (id bigserial PRIMARY KEY,' || ' pk repack.pk_' || $1 || ',' || ' row ' || repack.oid2text($1) || ')'; END $$ LANGUAGE plpgsql; CREATE FUNCTION repack.create_table(oid, name) RETURNS void AS $$ BEGIN EXECUTE 'CREATE TABLE repack.table_' || $1 || ' WITH (' || repack.get_storage_param($1) || ') ' || ' TABLESPACE ' || quote_ident($2) || ' AS SELECT ' || repack.get_columns_for_create_as($1) || ' FROM ONLY ' || repack.oid2text($1) || ' WITH NO DATA'; END $$ LANGUAGE plpgsql; CREATE FUNCTION repack.create_index_type(oid, oid) RETURNS void AS $$ BEGIN EXECUTE repack.get_create_index_type($1, 'repack.pk_' || $2); END $$ LANGUAGE plpgsql; CREATE FUNCTION repack.get_create_index_type(oid, name) RETURNS text AS $$ SELECT 'CREATE TYPE ' || $2 || ' AS (' || coalesce(string_agg(quote_ident(attname) || ' ' || pg_catalog.format_type(atttypid, atttypmod), ', '), '') || ')' FROM pg_attribute, (SELECT indrelid, indkey, generate_series(0, indnatts-1) AS i FROM pg_index WHERE indexrelid = $1 ) AS keys WHERE attrelid = indrelid AND attnum = indkey[i]; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.get_create_trigger(relid oid, pkid oid) RETURNS text AS $$ SELECT 'CREATE TRIGGER repack_trigger' || ' AFTER INSERT OR DELETE OR UPDATE ON ' || repack.oid2text($1) || ' FOR EACH ROW EXECUTE PROCEDURE repack.repack_trigger(' || repack.get_index_columns($2) || ')'; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.get_enable_trigger(relid oid) RETURNS text AS $$ SELECT 'ALTER TABLE ' || repack.oid2text($1) || ' ENABLE ALWAYS TRIGGER repack_trigger'; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.get_assign(oid, text) RETURNS text AS $$ SELECT '(' || coalesce(string_agg(quote_ident(attname), ', '), '') || ') = (' || $2 || '.' || coalesce(string_agg(quote_ident(attname), ', ' || $2 || '.'), '') || ')' FROM (SELECT attname FROM pg_attribute WHERE attrelid = $1 AND attnum > 0 AND NOT attisdropped ORDER BY attnum) tmp; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.get_compare_pkey(oid, text) RETURNS text AS $$ SELECT '(' || coalesce(string_agg(quote_ident(attname), ', '), '') || ') = (' || $2 || '.' || coalesce(string_agg(quote_ident(attname), ', ' || $2 || '.'), '') || ')' FROM pg_attribute, (SELECT indrelid, indkey, generate_series(0, indnatts-1) AS i FROM pg_index WHERE indexrelid = $1 ) AS keys WHERE attrelid = indrelid AND attnum = indkey[i]; $$ LANGUAGE sql STABLE STRICT; -- Get a column list for SELECT all columns including dropped ones. -- We use NULLs of integer types for dropped columns (types are not important). CREATE FUNCTION repack.get_columns_for_create_as(oid) RETURNS text AS $$ SELECT coalesce(string_agg(c, ','), '') FROM (SELECT CASE WHEN attisdropped THEN 'NULL::integer AS ' || quote_ident(attname) ELSE quote_ident(attname) END AS c FROM pg_attribute WHERE attrelid = $1 AND attnum > 0 ORDER BY attnum ) AS COL $$ LANGUAGE sql STABLE STRICT; -- Get a SQL text to DROP dropped columns for the table, -- or NULL if it has no dropped columns. CREATE FUNCTION repack.get_drop_columns(oid, text) RETURNS text AS $$ SELECT 'ALTER TABLE ' || $2 || ' ' || array_to_string(dropped_columns, ', ') FROM ( SELECT array_agg('DROP COLUMN ' || quote_ident(attname)) AS dropped_columns FROM ( SELECT * FROM pg_attribute WHERE attrelid = $1 AND attnum > 0 AND attisdropped ORDER BY attnum ) T ) T WHERE array_upper(dropped_columns, 1) > 0 $$ LANGUAGE sql STABLE STRICT; -- Get a comma-separated storage parameter for the table including -- parameters for the corresponding TOAST table. -- Note that since oid setting is always not NULL, this function -- never returns NULL CREATE FUNCTION repack.get_storage_param(oid) RETURNS TEXT AS $$ SELECT string_agg(param, ', ') FROM ( -- table storage parameter SELECT unnest(reloptions) as param FROM pg_class WHERE oid = $1 UNION ALL -- TOAST table storage parameter SELECT ('toast.' || unnest(reloptions)) as param FROM ( SELECT reltoastrelid from pg_class where oid = $1 ) as t, pg_class as c WHERE c.oid = t.reltoastrelid UNION ALL -- table oid SELECT 'oids = ' || CASE WHEN relhasoids THEN 'true' ELSE 'false' END FROM pg_class WHERE oid = $1 ) as t $$ LANGUAGE sql STABLE STRICT; -- GET a SQL text to set column storage option for the table. CREATE FUNCTION repack.get_alter_col_storage(oid) RETURNS text AS $$ SELECT 'ALTER TABLE repack.table_' || $1 || array_to_string(column_storage, ',') FROM ( SELECT array_agg(' ALTER ' || quote_ident(attname) || CASE attstorage WHEN 'p' THEN ' SET STORAGE PLAIN' WHEN 'm' THEN ' SET STORAGE MAIN' WHEN 'e' THEN ' SET STORAGE EXTERNAL' WHEN 'x' THEN ' SET STORAGE EXTENDED' END) AS column_storage FROM ( SELECT * FROM pg_attribute a JOIN pg_type t on t.oid = atttypid JOIN pg_class r on r.oid = a.attrelid JOIN pg_namespace s on s.oid = r.relnamespace WHERE typstorage <> attstorage AND attrelid = $1 AND attnum > 0 AND NOT attisdropped ORDER BY attnum ) T ) T WHERE array_upper(column_storage , 1) > 0 $$ LANGUAGE sql STABLE STRICT; -- includes not only PRIMARY KEYS but also UNIQUE NOT NULL keys CREATE VIEW repack.primary_keys AS SELECT indrelid, min(indexrelid) AS indexrelid FROM (SELECT indrelid, indexrelid FROM pg_index WHERE indisunique AND indisvalid AND indpred IS NULL AND 0 <> ALL(indkey) AND NOT EXISTS( SELECT 1 FROM pg_attribute WHERE attrelid = indrelid AND attnum = ANY(indkey) AND NOT attnotnull) ORDER BY indrelid, indisprimary DESC, indnatts, indkey) tmp GROUP BY indrelid; CREATE VIEW repack.tables AS SELECT repack.oid2text(R.oid) AS relname, R.oid AS relid, R.reltoastrelid AS reltoastrelid, CASE WHEN R.reltoastrelid = 0 THEN 0 ELSE ( SELECT indexrelid FROM pg_index WHERE indrelid = R.reltoastrelid AND indisvalid) END AS reltoastidxid, N.nspname AS schemaname, PK.indexrelid AS pkid, CK.indexrelid AS ckid, 'SELECT repack.create_index_type(' || PK.indexrelid || ',' || R.oid || ')' AS create_pktype, 'SELECT repack.create_log_table(' || R.oid || ')' AS create_log, repack.get_create_trigger(R.oid, PK.indexrelid) AS create_trigger, repack.get_enable_trigger(R.oid) as enable_trigger, 'SELECT repack.create_table($1, $2)' AS create_table, coalesce(S.spcname, S2.spcname) AS tablespace_orig, 'INSERT INTO repack.table_' || R.oid || ' SELECT ' || repack.get_columns_for_create_as(R.oid) || ' FROM ONLY ' || repack.oid2text(R.oid) AS copy_data, repack.get_alter_col_storage(R.oid) AS alter_col_storage, repack.get_drop_columns(R.oid, 'repack.table_' || R.oid) AS drop_columns, 'DELETE FROM repack.log_' || R.oid AS delete_log, 'LOCK TABLE ' || repack.oid2text(R.oid) || ' IN ACCESS EXCLUSIVE MODE' AS lock_table, repack.get_order_by(CK.indexrelid, R.oid) AS ckey, 'SELECT * FROM repack.log_' || R.oid || ' ORDER BY id LIMIT $1' AS sql_peek, 'INSERT INTO repack.table_' || R.oid || ' VALUES ($1.*)' AS sql_insert, 'DELETE FROM repack.table_' || R.oid || ' WHERE ' || repack.get_compare_pkey(PK.indexrelid, '$1') AS sql_delete, 'UPDATE repack.table_' || R.oid || ' SET ' || repack.get_assign(R.oid, '$2') || ' WHERE ' || repack.get_compare_pkey(PK.indexrelid, '$1') AS sql_update, 'DELETE FROM repack.log_' || R.oid || ' WHERE id IN (' AS sql_pop FROM pg_class R LEFT JOIN pg_class T ON R.reltoastrelid = T.oid LEFT JOIN repack.primary_keys PK ON R.oid = PK.indrelid LEFT JOIN (SELECT CKI.* FROM pg_index CKI, pg_class CKT WHERE CKI.indisvalid AND CKI.indexrelid = CKT.oid AND CKI.indisclustered AND CKT.relam = 403) CK ON R.oid = CK.indrelid LEFT JOIN pg_namespace N ON N.oid = R.relnamespace LEFT JOIN pg_tablespace S ON S.oid = R.reltablespace CROSS JOIN (SELECT S2.spcname FROM pg_catalog.pg_database D JOIN pg_catalog.pg_tablespace S2 ON S2.oid = D.dattablespace WHERE D.datname = current_database()) S2 WHERE R.relkind = 'r' AND R.relpersistence = 'p' AND N.nspname NOT IN ('pg_catalog', 'information_schema') AND N.nspname NOT LIKE E'pg\\_temp\\_%'; CREATE FUNCTION repack.repack_indexdef(oid, oid, name, bool) RETURNS text AS 'MODULE_PATHNAME', 'repack_indexdef' LANGUAGE C STABLE; CREATE FUNCTION repack.repack_trigger() RETURNS trigger AS 'MODULE_PATHNAME', 'repack_trigger' LANGUAGE C VOLATILE STRICT SECURITY DEFINER SET search_path = pg_catalog, pg_temp; CREATE FUNCTION repack.conflicted_triggers(oid) RETURNS SETOF name AS $$ SELECT tgname FROM pg_trigger WHERE tgrelid = $1 AND tgname = 'repack_trigger' ORDER BY tgname; $$ LANGUAGE sql STABLE STRICT; CREATE FUNCTION repack.disable_autovacuum(regclass) RETURNS void AS 'MODULE_PATHNAME', 'repack_disable_autovacuum' LANGUAGE C VOLATILE STRICT; CREATE FUNCTION repack.repack_apply( sql_peek cstring, sql_insert cstring, sql_delete cstring, sql_update cstring, sql_pop cstring, count integer) RETURNS integer AS 'MODULE_PATHNAME', 'repack_apply' LANGUAGE C VOLATILE; CREATE FUNCTION repack.repack_swap(oid) RETURNS void AS 'MODULE_PATHNAME', 'repack_swap' LANGUAGE C VOLATILE STRICT; CREATE FUNCTION repack.repack_drop(oid, int) RETURNS void AS 'MODULE_PATHNAME', 'repack_drop' LANGUAGE C VOLATILE STRICT; CREATE FUNCTION repack.repack_index_swap(oid) RETURNS void AS 'MODULE_PATHNAME', 'repack_index_swap' LANGUAGE C STABLE STRICT; CREATE FUNCTION repack.get_table_and_inheritors(regclass) RETURNS regclass[] AS 'MODULE_PATHNAME', 'repack_get_table_and_inheritors' LANGUAGE C STABLE STRICT; pg_repack-ver_1.5.0/lib/pgut/000077500000000000000000000000001452746007700160655ustar00rootroot00000000000000pg_repack-ver_1.5.0/lib/pgut/pgut-be.h000066400000000000000000000025451452746007700176070ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pgut-be.h * * Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2012-2020, The Reorg Development Team * *------------------------------------------------------------------------- */ #ifndef PGUT_BE_H #define PGUT_BE_H #include "fmgr.h" #include "utils/tuplestore.h" #ifndef WIN32 #define PGUT_EXPORT #else #define PGUT_EXPORT __declspec(dllexport) /* * PG_MODULE_MAGIC and PG_FUNCTION_INFO_V1 macros seems to be broken. * It uses PGDLLIMPORT, but those objects are not imported from postgres * and exported from the user module. So, it should be always dllexported. */ #undef PG_MODULE_MAGIC #define PG_MODULE_MAGIC \ extern PGUT_EXPORT const Pg_magic_struct *PG_MAGIC_FUNCTION_NAME(void); \ const Pg_magic_struct * \ PG_MAGIC_FUNCTION_NAME(void) \ { \ static const Pg_magic_struct Pg_magic_data = PG_MODULE_MAGIC_DATA; \ return &Pg_magic_data; \ } \ extern int no_such_variable #undef PG_FUNCTION_INFO_V1 #define PG_FUNCTION_INFO_V1(funcname) \ extern PGUT_EXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ const Pg_finfo_record * \ CppConcat(pg_finfo_,funcname) (void) \ { \ static const Pg_finfo_record my_finfo = { 1 }; \ return &my_finfo; \ } \ extern int no_such_variable #endif #endif /* PGUT_BE_H */ pg_repack-ver_1.5.0/lib/pgut/pgut-spi.c000066400000000000000000000055031452746007700200040ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut-spi.c * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #include "postgres.h" #include "pgut-spi.h" #include "lib/stringinfo.h" #define EXEC_FAILED(ret, expected) \ (((expected) > 0 && (ret) != (expected)) || (ret) < 0) static void appendStringInfoVA_s(StringInfo str, const char *fmt, va_list args) __attribute__((format(printf, 2, 0))); static void termStringInfo(StringInfo str) { if (str && str->data) pfree(str->data); } /* appendStringInfoVA + automatic buffer extension */ static void appendStringInfoVA_s(StringInfo str, const char *fmt, va_list args) { int needed; while ((needed = appendStringInfoVA(str, fmt, args)) > 0) { /* Double the buffer size and try again. */ enlargeStringInfo(str, needed); } } /* simple execute */ void execute(int expected, const char *sql) { int ret = SPI_execute(sql, false, 0); if EXEC_FAILED(ret, expected) elog(ERROR, "query failed: (sql=%s, code=%d, expected=%d)", sql, ret, expected); } /* execute prepared plan */ void execute_plan(int expected, SPIPlanPtr plan, Datum *values, const char *nulls) { int ret = SPI_execute_plan(plan, values, nulls, false, 0); if EXEC_FAILED(ret, expected) elog(ERROR, "query failed: (code=%d, expected=%d)", ret, expected); } /* execute sql with format */ void execute_with_format(int expected, const char *format, ...) { va_list ap; StringInfoData sql; int ret; initStringInfo(&sql); va_start(ap, format); appendStringInfoVA_s(&sql, format, ap); va_end(ap); if (sql.len == 0) elog(WARNING, "execute_with_format(%s)", format); ret = SPI_exec(sql.data, 0); if EXEC_FAILED(ret, expected) elog(ERROR, "query failed: (sql=%s, code=%d, expected=%d)", sql.data, ret, expected); termStringInfo(&sql); } void execute_with_args(int expected, const char *src, int nargs, Oid argtypes[], Datum values[], const bool nulls[]) { int ret; int i; char c_nulls[FUNC_MAX_ARGS]; memset(c_nulls, 0, sizeof(c_nulls)); for (i = 0; i < nargs; i++) c_nulls[i] = (nulls[i] ? 'n' : ' '); ret = SPI_execute_with_args(src, nargs, argtypes, values, c_nulls, false, 0); if EXEC_FAILED(ret, expected) elog(ERROR, "query failed: (sql=%s, code=%d, expected=%d)", src, ret, expected); } void execute_with_format_args(int expected, const char *format, int nargs, Oid argtypes[], Datum values[], const bool nulls[], ...) { va_list ap; StringInfoData sql; initStringInfo(&sql); va_start(ap, nulls); appendStringInfoVA_s(&sql, format, ap); va_end(ap); execute_with_args(expected, sql.data, nargs, argtypes, values, nulls); termStringInfo(&sql); } pg_repack-ver_1.5.0/lib/pgut/pgut-spi.h000066400000000000000000000020511452746007700200040ustar00rootroot00000000000000/*------------------------------------------------------------------------- * pgut-spi.h * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team *------------------------------------------------------------------------- */ #ifndef PGUT_SPI_H #define PGUT_SPI_H #include "executor/spi.h" #ifdef _MSC_VER #define __attribute__(x) #endif extern void execute(int expected, const char *sql); extern void execute_plan(int expected, SPIPlanPtr plan, Datum *values, const char *nulls); extern void execute_with_format(int expected, const char *format, ...) __attribute__((format(printf, 2, 3))); extern void execute_with_args(int expected, const char *src, int nargs, Oid argtypes[], Datum values[], const bool nulls[]); extern void execute_with_format_args(int expected, const char *format, int nargs, Oid argtypes[], Datum values[], const bool nulls[], ...) __attribute__((format(printf, 2, 7))); #endif /* PGUT_SPI_H */ pg_repack-ver_1.5.0/lib/repack.c000066400000000000000000001136551452746007700165320ustar00rootroot00000000000000/* * pg_repack: lib/repack.c * * Portions Copyright (c) 2008-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION * Portions Copyright (c) 2011, Itagaki Takahiro * Portions Copyright (c) 2012-2020, The Reorg Development Team */ #include "postgres.h" #include #include "access/genam.h" #include "access/transam.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" /* * heap_open/heap_close was moved to table_open/table_close in 12.0 */ #if PG_VERSION_NUM >= 120000 #include "access/table.h" #endif /* * utils/rel.h no longer includes pg_am.h as of 9.6, so need to include * it explicitly. */ #if PG_VERSION_NUM >= 90600 #include "catalog/pg_am.h" #endif /* * catalog/pg_foo_fn.h headers was merged back into pg_foo.h headers */ #if PG_VERSION_NUM >= 110000 #include "catalog/pg_inherits.h" #else #include "catalog/pg_inherits_fn.h" #endif #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_type.h" #include "commands/tablecmds.h" #include "commands/trigger.h" #include "miscadmin.h" #include "storage/lmgr.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" #include "utils/syscache.h" #include "pgut/pgut-spi.h" #include "pgut/pgut-be.h" #include "access/htup_details.h" /* builtins.h was reorganized for 9.5, so now we need this header */ #if PG_VERSION_NUM >= 90500 #include "utils/ruleutils.h" #endif PG_MODULE_MAGIC; extern Datum PGUT_EXPORT repack_version(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_trigger(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_apply(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_get_order_by(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_indexdef(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_swap(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_drop(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_disable_autovacuum(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_index_swap(PG_FUNCTION_ARGS); extern Datum PGUT_EXPORT repack_get_table_and_inheritors(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(repack_version); PG_FUNCTION_INFO_V1(repack_trigger); PG_FUNCTION_INFO_V1(repack_apply); PG_FUNCTION_INFO_V1(repack_get_order_by); PG_FUNCTION_INFO_V1(repack_indexdef); PG_FUNCTION_INFO_V1(repack_swap); PG_FUNCTION_INFO_V1(repack_drop); PG_FUNCTION_INFO_V1(repack_disable_autovacuum); PG_FUNCTION_INFO_V1(repack_index_swap); PG_FUNCTION_INFO_V1(repack_get_table_and_inheritors); static void repack_init(void); static SPIPlanPtr repack_prepare(const char *src, int nargs, Oid *argtypes); static const char *get_quoted_relname(Oid oid); static const char *get_quoted_nspname(Oid oid); static void swap_heap_or_index_files(Oid r1, Oid r2); #define copy_tuple(tuple, desc) \ PointerGetDatum(SPI_returntuple((tuple), (desc))) #define IsToken(c) \ (IS_HIGHBIT_SET((c)) || isalnum((unsigned char) (c)) || (c) == '_') /* check access authority */ static void must_be_superuser(const char *func) { if (!superuser()) elog(ERROR, "must be superuser to use %s function", func); } /* The API of RenameRelationInternal() was changed in 9.2. * Use the RENAME_REL macro for compatibility across versions. */ #if PG_VERSION_NUM < 120000 #define RENAME_REL(relid, newrelname) RenameRelationInternal(relid, newrelname, true); #else #define RENAME_REL(relid, newrelname) RenameRelationInternal(relid, newrelname, true, false); #endif /* * is_index flag was added in 12.0, prefer separate macro for relation and index */ #if PG_VERSION_NUM < 120000 #define RENAME_INDEX(relid, newrelname) RENAME_REL(relid, newrelname); #else #define RENAME_INDEX(relid, newrelname) RenameRelationInternal(relid, newrelname, true, true); #endif #ifdef REPACK_VERSION /* macro trick to stringify a macro expansion */ #define xstr(s) str(s) #define str(s) #s #define LIBRARY_VERSION xstr(REPACK_VERSION) #else #define LIBRARY_VERSION "unknown" #endif Datum repack_version(PG_FUNCTION_ARGS) { return CStringGetTextDatum("pg_repack " LIBRARY_VERSION); } /** * @fn Datum repack_trigger(PG_FUNCTION_ARGS) * @brief Insert a operation log into log-table. * * repack_trigger(column1, ..., columnN) * * @param column1 A column of the table in primary key/unique index. * ... * @param columnN A column of the table in primary key/unique index. */ Datum repack_trigger(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; TupleDesc desc; HeapTuple tuple; Datum values[2]; bool nulls[2] = { 0, 0 }; Oid argtypes[2]; Oid relid; StringInfo sql; /* authority check */ must_be_superuser("repack_trigger"); /* make sure it's called as a trigger at all */ if (!CALLED_AS_TRIGGER(fcinfo) || !TRIGGER_FIRED_AFTER(trigdata->tg_event) || !TRIGGER_FIRED_FOR_ROW(trigdata->tg_event) || trigdata->tg_trigger->tgnargs < 1) elog(ERROR, "repack_trigger: invalid trigger call"); relid = RelationGetRelid(trigdata->tg_relation); /* retrieve parameters */ desc = RelationGetDescr(trigdata->tg_relation); argtypes[0] = argtypes[1] = trigdata->tg_relation->rd_rel->reltype; /* connect to SPI manager */ repack_init(); if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) { /* INSERT: (NULL, newtup) */ tuple = trigdata->tg_trigtuple; nulls[0] = true; values[1] = copy_tuple(tuple, desc); } else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) { /* DELETE: (oldtup, NULL) */ tuple = trigdata->tg_trigtuple; values[0] = copy_tuple(tuple, desc); nulls[1] = true; } else { /* UPDATE: (oldtup, newtup) */ tuple = trigdata->tg_newtuple; values[0] = copy_tuple(trigdata->tg_trigtuple, desc); values[1] = copy_tuple(tuple, desc); } /* prepare INSERT query */ sql = makeStringInfo(); appendStringInfo(sql, "INSERT INTO repack.log_%d(pk, row) " "VALUES(CASE WHEN $1 IS NULL THEN NULL ELSE (ROW(", relid); appendStringInfo(sql, "$1.%s", quote_identifier(trigdata->tg_trigger->tgargs[0])); for (int i = 1; i < trigdata->tg_trigger->tgnargs; ++i) appendStringInfo(sql, ", $1.%s", quote_identifier(trigdata->tg_trigger->tgargs[i])); appendStringInfo(sql, ")::repack.pk_%d) END, $2)", relid); /* execute the INSERT query */ execute_with_args(SPI_OK_INSERT, sql->data, 2, argtypes, values, nulls); SPI_finish(); PG_RETURN_POINTER(tuple); } /** * @fn Datum repack_apply(PG_FUNCTION_ARGS) * @brief Apply operations in log table into temp table. * * repack_apply(sql_peek, sql_insert, sql_delete, sql_update, sql_pop, count) * * @param sql_peek SQL to pop tuple from log table. * @param sql_insert SQL to insert into temp table. * @param sql_delete SQL to delete from temp table. * @param sql_update SQL to update temp table. * @param sql_pop SQL to bulk-delete tuples from log table. * @param count Max number of operations, or no count iff <=0. * @retval Number of performed operations. */ Datum repack_apply(PG_FUNCTION_ARGS) { #define DEFAULT_PEEK_COUNT 1000 const char *sql_peek = PG_GETARG_CSTRING(0); const char *sql_insert = PG_GETARG_CSTRING(1); const char *sql_delete = PG_GETARG_CSTRING(2); const char *sql_update = PG_GETARG_CSTRING(3); /* sql_pop, the fourth arg, will be used in the loop below */ int32 count = PG_GETARG_INT32(5); SPIPlanPtr plan_peek = NULL; SPIPlanPtr plan_insert = NULL; SPIPlanPtr plan_delete = NULL; SPIPlanPtr plan_update = NULL; uint32 n, i; Oid argtypes_peek[1] = { INT4OID }; Datum values_peek[1]; const char nulls_peek[1] = { 0 }; StringInfoData sql_pop; initStringInfo(&sql_pop); /* authority check */ must_be_superuser("repack_apply"); /* connect to SPI manager */ repack_init(); /* peek tuple in log */ plan_peek = repack_prepare(sql_peek, 1, argtypes_peek); for (n = 0;;) { int ntuples; SPITupleTable *tuptable; TupleDesc desc; Oid argtypes[3]; /* id, pk, row */ Datum values[3]; /* id, pk, row */ bool nulls[3]; /* id, pk, row */ /* peek tuple in log */ if (count <= 0) values_peek[0] = Int32GetDatum(DEFAULT_PEEK_COUNT); else values_peek[0] = Int32GetDatum(Min(count - n, DEFAULT_PEEK_COUNT)); execute_plan(SPI_OK_SELECT, plan_peek, values_peek, nulls_peek); if (SPI_processed <= 0) break; /* copy tuptable because we will call other sqls. */ ntuples = SPI_processed; tuptable = SPI_tuptable; desc = tuptable->tupdesc; argtypes[0] = SPI_gettypeid(desc, 1); /* id */ argtypes[1] = SPI_gettypeid(desc, 2); /* pk */ argtypes[2] = SPI_gettypeid(desc, 3); /* row */ resetStringInfo(&sql_pop); appendStringInfoString(&sql_pop, PG_GETARG_CSTRING(4)); for (i = 0; i < ntuples; i++, n++) { HeapTuple tuple; char *pkid; tuple = tuptable->vals[i]; values[0] = SPI_getbinval(tuple, desc, 1, &nulls[0]); values[1] = SPI_getbinval(tuple, desc, 2, &nulls[1]); values[2] = SPI_getbinval(tuple, desc, 3, &nulls[2]); pkid = SPI_getvalue(tuple, desc, 1); Assert(pkid != NULL); if (nulls[1]) { /* INSERT */ if (plan_insert == NULL) plan_insert = repack_prepare(sql_insert, 1, &argtypes[2]); execute_plan(SPI_OK_INSERT, plan_insert, &values[2], (nulls[2] ? "n" : " ")); } else if (nulls[2]) { /* DELETE */ if (plan_delete == NULL) plan_delete = repack_prepare(sql_delete, 1, &argtypes[1]); execute_plan(SPI_OK_DELETE, plan_delete, &values[1], (nulls[1] ? "n" : " ")); } else { /* UPDATE */ if (plan_update == NULL) plan_update = repack_prepare(sql_update, 2, &argtypes[1]); execute_plan(SPI_OK_UPDATE, plan_update, &values[1], (nulls[1] ? "n" : " ")); } /* Add the primary key ID of each row from the log * table we have processed so far to this * DELETE ... IN (...) query string, so we * can delete all the rows we have processed at-once. */ if (i == 0) appendStringInfoString(&sql_pop, pkid); else appendStringInfo(&sql_pop, ",%s", pkid); pfree(pkid); } /* i must be > 0 (and hence we must have some rows to delete) * since SPI_processed > 0 */ Assert(i > 0); appendStringInfoString(&sql_pop, ");"); /* Bulk delete of processed rows from the log table */ execute(SPI_OK_DELETE, sql_pop.data); SPI_freetuptable(tuptable); } SPI_finish(); PG_RETURN_INT32(n); } /* * Parsed CREATE INDEX statement. You can rebuild sql using * sprintf(buf, "%s %s ON %s USING %s (%s)%s", * create, index, table type, columns, options) */ typedef struct IndexDef { char *create; /* CREATE INDEX or CREATE UNIQUE INDEX */ char *index; /* index name including schema */ char *table; /* table name including schema */ char *type; /* btree, hash, gist or gin */ char *columns; /* column definition */ char *options; /* options after columns, before TABLESPACE (e.g. COLLATE) */ char *tablespace; /* tablespace if specified */ char *where; /* WHERE content if specified */ } IndexDef; static char * get_relation_name(Oid relid) { Oid nsp = get_rel_namespace(relid); char *nspname; char *strver; int ver; if (!OidIsValid(nsp)) elog(ERROR, "table name not found for OID %u", relid); /* Get the version of the running server (PG_VERSION_NUM would return * the version we compiled the extension with) */ strver = GetConfigOptionByName("server_version_num", NULL #if PG_VERSION_NUM >= 90600 , false /* missing_ok */ #endif ); ver = atoi(strver); pfree(strver); /* * Relation names given by PostgreSQL core are always * qualified since some minor releases. Note that this change * wasn't introduced in PostgreSQL 9.2 and 9.1 releases. */ if ((ver >= 100000 && ver < 100003) || (ver >= 90600 && ver < 90608) || (ver >= 90500 && ver < 90512) || (ver >= 90400 && ver < 90417) || (ver >= 90300 && ver < 90322) || (ver >= 90200 && ver < 90300) || (ver >= 90100 && ver < 90200)) { /* Qualify the name if not visible in search path */ if (RelationIsVisible(relid)) nspname = NULL; else nspname = get_namespace_name(nsp); } else { /* Always qualify the name */ if (OidIsValid(nsp)) nspname = get_namespace_name(nsp); else nspname = NULL; } return quote_qualified_identifier(nspname, get_rel_name(relid)); } static char * parse_error(Oid index) { elog(ERROR, "unexpected index definition: %s", pg_get_indexdef_string(index)); return NULL; } static char * skip_const(Oid index, char *sql, const char *arg1, const char *arg2) { size_t len; if ((arg1 && strncmp(sql, arg1, (len = strlen(arg1))) == 0) || (arg2 && strncmp(sql, arg2, (len = strlen(arg2))) == 0)) { sql[len] = '\0'; return sql + len + 1; } /* error */ return parse_error(index); } static char * skip_until_const(Oid index, char *sql, const char *what) { char *pos; if ((pos = strstr(sql, what))) { size_t len; len = strlen(what); pos[-1] = '\0'; return pos + len + 1; } /* error */ return parse_error(index); } static char * skip_ident(Oid index, char *sql) { while (*sql && isspace((unsigned char) *sql)) sql++; if (*sql == '"') { sql++; for (;;) { char *end = strchr(sql, '"'); if (end == NULL) return parse_error(index); else if (end[1] != '"') { end[1] = '\0'; return end + 2; } else /* escaped quote ("") */ sql = end + 2; } } else { while (*sql && IsToken(*sql)) sql++; *sql = '\0'; return sql + 1; } /* error */ return parse_error(index); } /* * Skip until 'end' character found. The 'end' character is replaced with \0. * Returns the next character of the 'end', or NULL if 'end' is not found. */ static char * skip_until(Oid index, char *sql, char end) { char instr = 0; int nopen = 0; for (; *sql && (nopen > 0 || instr != 0 || *sql != end); sql++) { if (instr) { if (sql[0] == instr) { if (sql[1] == instr) sql++; else instr = 0; } else if (sql[0] == '\\') sql++; /* next char is always string */ } else { switch (sql[0]) { case '(': nopen++; break; case ')': nopen--; break; case '\'': case '"': instr = sql[0]; break; } } } if (nopen == 0 && instr == 0) { if (*sql) { *sql = '\0'; return sql + 1; } else return NULL; } /* error */ return parse_error(index); } static void parse_indexdef(IndexDef *stmt, Oid index, Oid table) { char *sql = pg_get_indexdef_string(index); const char *idxname = get_quoted_relname(index); const char *tblname = get_relation_name(table); const char *limit = strchr(sql, '\0'); /* CREATE [UNIQUE] INDEX */ stmt->create = sql; sql = skip_const(index, sql, "CREATE INDEX", "CREATE UNIQUE INDEX"); /* index */ stmt->index = sql; sql = skip_const(index, sql, idxname, NULL); /* ON */ sql = skip_const(index, sql, "ON", NULL); /* table */ stmt->table = sql; sql = skip_const(index, sql, tblname, NULL); /* USING */ sql = skip_const(index, sql, "USING", NULL); /* type */ stmt->type = sql; sql = skip_ident(index, sql); /* (columns) */ if ((sql = strchr(sql, '(')) == NULL) parse_error(index); sql++; stmt->columns = sql; if ((sql = skip_until(index, sql, ')')) == NULL) parse_error(index); /* options */ stmt->options = sql; stmt->tablespace = NULL; stmt->where = NULL; /* Is there a tablespace? Note that apparently there is never, but * if there was one it would appear here. */ if (sql < limit && strstr(sql, "TABLESPACE")) { sql = skip_until_const(index, sql, "TABLESPACE"); stmt->tablespace = sql; sql = skip_ident(index, sql); } /* Note: assuming WHERE is the only clause allowed after TABLESPACE */ if (sql < limit && strstr(sql, "WHERE")) { sql = skip_until_const(index, sql, "WHERE"); stmt->where = sql; } elog(DEBUG2, "indexdef.create = %s", stmt->create); elog(DEBUG2, "indexdef.index = %s", stmt->index); elog(DEBUG2, "indexdef.table = %s", stmt->table); elog(DEBUG2, "indexdef.type = %s", stmt->type); elog(DEBUG2, "indexdef.columns = %s", stmt->columns); elog(DEBUG2, "indexdef.options = %s", stmt->options); elog(DEBUG2, "indexdef.tspace = %s", stmt->tablespace); elog(DEBUG2, "indexdef.where = %s", stmt->where); } /* * Parse the trailing ... [ COLLATE X ] [ DESC ] [ NULLS { FIRST | LAST } ] from an index * definition column. * Returned values point to token. \0's are inserted to separate parsed parts. */ static void parse_indexdef_col(char *token, char **desc, char **nulls, char **collate) { char *pos; /* easier to walk backwards than to parse quotes and escapes... */ if (NULL != (pos = strstr(token, " NULLS FIRST"))) { *nulls = pos + 1; *pos = '\0'; } else if (NULL != (pos = strstr(token, " NULLS LAST"))) { *nulls = pos + 1; *pos = '\0'; } if (NULL != (pos = strstr(token, " DESC"))) { *desc = pos + 1; *pos = '\0'; } if (NULL != (pos = strstr(token, " COLLATE "))) { *collate = pos + 1; *pos = '\0'; } } /** * @fn Datum repack_get_order_by(PG_FUNCTION_ARGS) * @brief Get key definition of the index. * * repack_get_order_by(index, table) * * @param index Oid of target index. * @param table Oid of table of the index. * @retval Create index DDL for temp table. */ Datum repack_get_order_by(PG_FUNCTION_ARGS) { Oid index = PG_GETARG_OID(0); Oid table = PG_GETARG_OID(1); IndexDef stmt; char *token; char *next; StringInfoData str; Relation indexRel = NULL; int nattr; parse_indexdef(&stmt, index, table); /* * FIXME: this is very unreliable implementation but I don't want to * re-implement customized versions of pg_get_indexdef_string... */ initStringInfo(&str); for (nattr = 0, next = stmt.columns; next; nattr++) { char *opcname; char *coldesc = NULL; char *colnulls = NULL; char *colcollate = NULL; token = next; while (isspace((unsigned char) *token)) token++; next = skip_until(index, next, ','); parse_indexdef_col(token, &coldesc, &colnulls, &colcollate); opcname = skip_until(index, token, ' '); appendStringInfoString(&str, token); if (colcollate) appendStringInfo(&str, " %s", colcollate); if (coldesc) appendStringInfo(&str, " %s", coldesc); if (opcname) { /* lookup default operator name from operator class */ Oid opclass; Oid oprid; int16 strategy = BTLessStrategyNumber; Oid opcintype; Oid opfamily; HeapTuple tp; Form_pg_opclass opclassTup; opclass = OpclassnameGetOpcid(BTREE_AM_OID, opcname); /* Retrieve operator information. */ tp = SearchSysCache(CLAOID, ObjectIdGetDatum(opclass), 0, 0, 0); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for opclass %u", opclass); opclassTup = (Form_pg_opclass) GETSTRUCT(tp); opfamily = opclassTup->opcfamily; opcintype = opclassTup->opcintype; ReleaseSysCache(tp); if (!OidIsValid(opcintype)) { if (indexRel == NULL) indexRel = index_open(index, NoLock); #if PG_VERSION_NUM >= 110000 opcintype = TupleDescAttr(RelationGetDescr(indexRel), nattr)->atttypid; #else opcintype = RelationGetDescr(indexRel)->attrs[nattr]->atttypid; #endif } oprid = get_opfamily_member(opfamily, opcintype, opcintype, strategy); if (!OidIsValid(oprid)) elog(ERROR, "missing operator %d(%u,%u) in opfamily %u", strategy, opcintype, opcintype, opfamily); opcname[-1] = '\0'; appendStringInfo(&str, " USING %s", get_opname(oprid)); } if (colnulls) appendStringInfo(&str, " %s", colnulls); if (next) appendStringInfoString(&str, ", "); } if (indexRel != NULL) index_close(indexRel, NoLock); PG_RETURN_TEXT_P(cstring_to_text(str.data)); } /** * @fn Datum repack_indexdef(PG_FUNCTION_ARGS) * @brief Reproduce DDL that create index at the temp table. * * repack_indexdef(index, table) * * @param index Oid of target index. * @param table Oid of table of the index. * @param tablespace Namespace for the index. If NULL keep the original. * @param boolean Whether to use CONCURRENTLY when creating the index. * @retval Create index DDL for temp table. */ Datum repack_indexdef(PG_FUNCTION_ARGS) { Oid index; Oid table; Name tablespace = NULL; IndexDef stmt; StringInfoData str; bool concurrent_index = PG_GETARG_BOOL(3); if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) PG_RETURN_NULL(); index = PG_GETARG_OID(0); table = PG_GETARG_OID(1); if (!PG_ARGISNULL(2)) tablespace = PG_GETARG_NAME(2); parse_indexdef(&stmt, index, table); initStringInfo(&str); if (concurrent_index) appendStringInfo(&str, "%s CONCURRENTLY index_%u ON %s USING %s (%s)%s", stmt.create, index, stmt.table, stmt.type, stmt.columns, stmt.options); else appendStringInfo(&str, "%s index_%u ON repack.table_%u USING %s (%s)%s", stmt.create, index, table, stmt.type, stmt.columns, stmt.options); /* specify the new tablespace or the original one if any */ if (tablespace || stmt.tablespace) appendStringInfo(&str, " TABLESPACE %s", (tablespace ? quote_identifier(NameStr(*tablespace)) : stmt.tablespace)); if (stmt.where) appendStringInfo(&str, " WHERE %s", stmt.where); PG_RETURN_TEXT_P(cstring_to_text(str.data)); } static Oid getoid(HeapTuple tuple, TupleDesc desc, int column) { bool isnull; Datum datum = SPI_getbinval(tuple, desc, column, &isnull); return isnull ? InvalidOid : DatumGetObjectId(datum); } /** * @fn Datum repack_swap(PG_FUNCTION_ARGS) * @brief Swapping relfilenode of tables and relation ids of toast tables * and toast indexes. * * repack_swap(oid, relname) * * TODO: remove useless CommandCounterIncrement(). * * @param oid Oid of table of target. * @retval None. */ Datum repack_swap(PG_FUNCTION_ARGS) { Oid oid = PG_GETARG_OID(0); const char *relname = get_quoted_relname(oid); const char *nspname = get_quoted_nspname(oid); Oid argtypes[1] = { OIDOID }; bool nulls[1] = { 0 }; Datum values[1]; SPITupleTable *tuptable; TupleDesc desc; HeapTuple tuple; uint32 records; uint32 i; Oid reltoastrelid1; Oid reltoastidxid1; Oid oid2; Oid reltoastrelid2; Oid reltoastidxid2; Oid owner1; Oid owner2; /* authority check */ must_be_superuser("repack_swap"); /* connect to SPI manager */ repack_init(); /* swap relfilenode and dependencies for tables. */ values[0] = ObjectIdGetDatum(oid); execute_with_args(SPI_OK_SELECT, "SELECT X.reltoastrelid, TX.indexrelid, X.relowner," " Y.oid, Y.reltoastrelid, TY.indexrelid, Y.relowner" " FROM pg_catalog.pg_class X LEFT JOIN pg_catalog.pg_index TX" " ON X.reltoastrelid = TX.indrelid AND TX.indisvalid," " pg_catalog.pg_class Y LEFT JOIN pg_catalog.pg_index TY" " ON Y.reltoastrelid = TY.indrelid AND TY.indisvalid" " WHERE X.oid = $1" " AND Y.oid = ('repack.table_' || X.oid)::regclass", 1, argtypes, values, nulls); tuptable = SPI_tuptable; desc = tuptable->tupdesc; records = SPI_processed; if (records == 0) elog(ERROR, "repack_swap : no swap target"); tuple = tuptable->vals[0]; reltoastrelid1 = getoid(tuple, desc, 1); reltoastidxid1 = getoid(tuple, desc, 2); owner1 = getoid(tuple, desc, 3); oid2 = getoid(tuple, desc, 4); reltoastrelid2 = getoid(tuple, desc, 5); reltoastidxid2 = getoid(tuple, desc, 6); owner2 = getoid(tuple, desc, 7); /* change owner of new relation to original owner */ if (owner1 != owner2) { ATExecChangeOwner(oid2, owner1, true, AccessExclusiveLock); CommandCounterIncrement(); } /* swap tables. */ swap_heap_or_index_files(oid, oid2); CommandCounterIncrement(); /* swap indexes. */ values[0] = ObjectIdGetDatum(oid); execute_with_args(SPI_OK_SELECT, "SELECT X.oid, Y.oid" " FROM pg_catalog.pg_index I," " pg_catalog.pg_class X," " pg_catalog.pg_class Y" " WHERE I.indrelid = $1" " AND I.indexrelid = X.oid" " AND I.indisvalid" " AND Y.oid = ('repack.index_' || X.oid)::regclass", 1, argtypes, values, nulls); tuptable = SPI_tuptable; desc = tuptable->tupdesc; records = SPI_processed; for (i = 0; i < records; i++) { Oid idx1, idx2; tuple = tuptable->vals[i]; idx1 = getoid(tuple, desc, 1); idx2 = getoid(tuple, desc, 2); swap_heap_or_index_files(idx1, idx2); CommandCounterIncrement(); } /* swap names for toast tables and toast indexes */ if (reltoastrelid1 == InvalidOid && reltoastrelid2 == InvalidOid) { if (reltoastidxid1 != InvalidOid || reltoastidxid2 != InvalidOid) elog(ERROR, "repack_swap : unexpected toast relations (T1=%u, I1=%u, T2=%u, I2=%u", reltoastrelid1, reltoastidxid1, reltoastrelid2, reltoastidxid2); /* do nothing */ } else if (reltoastrelid1 == InvalidOid) { char name[NAMEDATALEN]; if (reltoastidxid1 != InvalidOid || reltoastidxid2 == InvalidOid) elog(ERROR, "repack_swap : unexpected toast relations (T1=%u, I1=%u, T2=%u, I2=%u", reltoastrelid1, reltoastidxid1, reltoastrelid2, reltoastidxid2); /* rename Y to X */ snprintf(name, NAMEDATALEN, "pg_toast_%u", oid); RENAME_REL(reltoastrelid2, name); snprintf(name, NAMEDATALEN, "pg_toast_%u_index", oid); RENAME_INDEX(reltoastidxid2, name); CommandCounterIncrement(); } else if (reltoastrelid2 == InvalidOid) { char name[NAMEDATALEN]; if (reltoastidxid1 == InvalidOid || reltoastidxid2 != InvalidOid) elog(ERROR, "repack_swap : unexpected toast relations (T1=%u, I1=%u, T2=%u, I2=%u", reltoastrelid1, reltoastidxid1, reltoastrelid2, reltoastidxid2); /* rename X to Y */ snprintf(name, NAMEDATALEN, "pg_toast_%u", oid2); RENAME_REL(reltoastrelid1, name); snprintf(name, NAMEDATALEN, "pg_toast_%u_index", oid2); RENAME_INDEX(reltoastidxid1, name); CommandCounterIncrement(); } else if (reltoastrelid1 != InvalidOid) { char name[NAMEDATALEN]; int pid = getpid(); /* rename X to TEMP */ snprintf(name, NAMEDATALEN, "pg_toast_pid%d", pid); RENAME_REL(reltoastrelid1, name); snprintf(name, NAMEDATALEN, "pg_toast_pid%d_index", pid); RENAME_INDEX(reltoastidxid1, name); CommandCounterIncrement(); /* rename Y to X */ snprintf(name, NAMEDATALEN, "pg_toast_%u", oid); RENAME_REL(reltoastrelid2, name); snprintf(name, NAMEDATALEN, "pg_toast_%u_index", oid); RENAME_INDEX(reltoastidxid2, name); CommandCounterIncrement(); /* rename TEMP to Y */ snprintf(name, NAMEDATALEN, "pg_toast_%u", oid2); RENAME_REL(reltoastrelid1, name); snprintf(name, NAMEDATALEN, "pg_toast_%u_index", oid2); RENAME_INDEX(reltoastidxid1, name); CommandCounterIncrement(); } /* drop repack trigger */ execute_with_format( SPI_OK_UTILITY, "DROP TRIGGER IF EXISTS repack_trigger ON %s.%s CASCADE", nspname, relname); SPI_finish(); PG_RETURN_VOID(); } /** * @fn Datum repack_drop(PG_FUNCTION_ARGS) * @brief Delete temporarily objects. * * repack_drop(oid, relname) * * @param oid Oid of target table. * @retval None. */ Datum repack_drop(PG_FUNCTION_ARGS) { Oid oid = PG_GETARG_OID(0); int numobj = PG_GETARG_INT32(1); const char *relname = get_quoted_relname(oid); const char *nspname = get_quoted_nspname(oid); if (!(relname && nspname)) { elog(ERROR, "table name not found for OID %u", oid); PG_RETURN_VOID(); } /* authority check */ must_be_superuser("repack_drop"); /* connect to SPI manager */ repack_init(); /* * To prevent concurrent lockers of the repack target table from causing * deadlocks, take an exclusive lock on it. Consider that the following * commands take exclusive lock on tables log_xxx and the target table * itself when deleting the repack_trigger on it, while concurrent * updaters require row exclusive lock on the target table and in * addition, on the log_xxx table, because of the trigger. * * Consider how a deadlock could occur - if the DROP TABLE repack.log_%u * gets a lock on log_%u table before a concurrent updater could get it * but after the updater has obtained a lock on the target table, the * subsequent DROP TRIGGER ... ON target-table would report a deadlock as * it finds itself waiting for a lock on target-table held by the updater, * which in turn, is waiting for lock on log_%u table. * * Fixes deadlock mentioned in the Github issue #55. * * Skip the lock if we are not going to do anything. * Otherwise, if repack gets accidentally run twice for the same table * at the same time, the second repack, in order to perform * a pointless cleanup, has to wait until the first one completes. * This adds an ACCESS EXCLUSIVE lock request into the queue * making the table effectively inaccessible for any other backend. */ if (numobj > 0) { execute_with_format( SPI_OK_UTILITY, "LOCK TABLE %s.%s IN ACCESS EXCLUSIVE MODE", nspname, relname); } /* drop log table: must be done before dropping the pk type, * since the log table is dependent on the pk type. (That's * why we check numobj > 1 here.) */ if (numobj > 1) { execute_with_format( SPI_OK_UTILITY, "DROP TABLE IF EXISTS repack.log_%u CASCADE", oid); --numobj; } /* drop type for pk type */ if (numobj > 0) { execute_with_format( SPI_OK_UTILITY, "DROP TYPE IF EXISTS repack.pk_%u", oid); --numobj; } /* * drop repack trigger: We have already dropped the trigger in normal * cases, but it can be left on error. */ if (numobj > 0) { execute_with_format( SPI_OK_UTILITY, "DROP TRIGGER IF EXISTS repack_trigger ON %s.%s CASCADE", nspname, relname); --numobj; } /* drop temp table */ if (numobj > 0) { execute_with_format( SPI_OK_UTILITY, "DROP TABLE IF EXISTS repack.table_%u CASCADE", oid); --numobj; } SPI_finish(); PG_RETURN_VOID(); } Datum repack_disable_autovacuum(PG_FUNCTION_ARGS) { Oid oid = PG_GETARG_OID(0); /* connect to SPI manager */ repack_init(); execute_with_format( SPI_OK_UTILITY, "ALTER TABLE %s SET (autovacuum_enabled = off)", get_relation_name(oid)); SPI_finish(); PG_RETURN_VOID(); } /* init SPI */ static void repack_init(void) { int ret = SPI_connect(); if (ret != SPI_OK_CONNECT) elog(ERROR, "pg_repack: SPI_connect returned %d", ret); } /* prepare plan */ static SPIPlanPtr repack_prepare(const char *src, int nargs, Oid *argtypes) { SPIPlanPtr plan = SPI_prepare(src, nargs, argtypes); if (plan == NULL) elog(ERROR, "pg_repack: repack_prepare failed (code=%d, query=%s)", SPI_result, src); return plan; } static const char * get_quoted_relname(Oid oid) { const char *relname = get_rel_name(oid); return (relname ? quote_identifier(relname) : NULL); } static const char * get_quoted_nspname(Oid oid) { const char *nspname = get_namespace_name(get_rel_namespace(oid)); return (nspname ? quote_identifier(nspname) : NULL); } /* * This is a copy of swap_relation_files in cluster.c, but it also swaps * relfrozenxid. */ static void swap_heap_or_index_files(Oid r1, Oid r2) { Relation relRelation; HeapTuple reltup1, reltup2; Form_pg_class relform1, relform2; Oid swaptemp; CatalogIndexState indstate; /* We need writable copies of both pg_class tuples. */ #if PG_VERSION_NUM >= 120000 relRelation = table_open(RelationRelationId, RowExclusiveLock); #else relRelation = heap_open(RelationRelationId, RowExclusiveLock); #endif reltup1 = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(r1)); if (!HeapTupleIsValid(reltup1)) elog(ERROR, "cache lookup failed for relation %u", r1); relform1 = (Form_pg_class) GETSTRUCT(reltup1); reltup2 = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(r2)); if (!HeapTupleIsValid(reltup2)) elog(ERROR, "cache lookup failed for relation %u", r2); relform2 = (Form_pg_class) GETSTRUCT(reltup2); Assert(relform1->relkind == relform2->relkind); /* * Actually swap the fields in the two tuples */ swaptemp = relform1->relfilenode; relform1->relfilenode = relform2->relfilenode; relform2->relfilenode = swaptemp; swaptemp = relform1->reltablespace; relform1->reltablespace = relform2->reltablespace; relform2->reltablespace = swaptemp; swaptemp = relform1->reltoastrelid; relform1->reltoastrelid = relform2->reltoastrelid; relform2->reltoastrelid = swaptemp; /* set rel1's frozen Xid to larger one */ if (TransactionIdIsNormal(relform1->relfrozenxid)) { if (TransactionIdFollows(relform1->relfrozenxid, relform2->relfrozenxid)) relform1->relfrozenxid = relform2->relfrozenxid; else relform2->relfrozenxid = relform1->relfrozenxid; } /* swap size statistics too, since new rel has freshly-updated stats */ { int32 swap_pages; float4 swap_tuples; swap_pages = relform1->relpages; relform1->relpages = relform2->relpages; relform2->relpages = swap_pages; swap_tuples = relform1->reltuples; relform1->reltuples = relform2->reltuples; relform2->reltuples = swap_tuples; } indstate = CatalogOpenIndexes(relRelation); #if PG_VERSION_NUM < 100000 /* Update the tuples in pg_class */ simple_heap_update(relRelation, &reltup1->t_self, reltup1); simple_heap_update(relRelation, &reltup2->t_self, reltup2); /* Keep system catalogs current */ CatalogIndexInsert(indstate, reltup1); CatalogIndexInsert(indstate, reltup2); #else CatalogTupleUpdateWithInfo(relRelation, &reltup1->t_self, reltup1, indstate); CatalogTupleUpdateWithInfo(relRelation, &reltup2->t_self, reltup2, indstate); #endif CatalogCloseIndexes(indstate); /* * If we have toast tables associated with the relations being swapped, * change their dependency links to re-associate them with their new * owning relations. Otherwise the wrong one will get dropped ... * * NOTE: it is possible that only one table has a toast table; this can * happen in CLUSTER if there were dropped columns in the old table, and * in ALTER TABLE when adding or changing type of columns. * * NOTE: at present, a TOAST table's only dependency is the one on its * owning table. If more are ever created, we'd need to use something * more selective than deleteDependencyRecordsFor() to get rid of only the * link we want. */ if (relform1->reltoastrelid || relform2->reltoastrelid) { ObjectAddress baseobject, toastobject; long count; /* Delete old dependencies */ if (relform1->reltoastrelid) { count = deleteDependencyRecordsFor(RelationRelationId, relform1->reltoastrelid, false); if (count != 1) elog(ERROR, "expected one dependency record for TOAST table, found %ld", count); } if (relform2->reltoastrelid) { count = deleteDependencyRecordsFor(RelationRelationId, relform2->reltoastrelid, false); if (count != 1) elog(ERROR, "expected one dependency record for TOAST table, found %ld", count); } /* Register new dependencies */ baseobject.classId = RelationRelationId; baseobject.objectSubId = 0; toastobject.classId = RelationRelationId; toastobject.objectSubId = 0; if (relform1->reltoastrelid) { baseobject.objectId = r1; toastobject.objectId = relform1->reltoastrelid; recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL); } if (relform2->reltoastrelid) { baseobject.objectId = r2; toastobject.objectId = relform2->reltoastrelid; recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL); } } /* * Blow away the old relcache entries now. We need this kluge because * relcache.c keeps a link to the smgr relation for the physical file, and * that will be out of date as soon as we do CommandCounterIncrement. * Whichever of the rels is the second to be cleared during cache * invalidation will have a dangling reference to an already-deleted smgr * relation. Rather than trying to avoid this by ordering operations just * so, it's easiest to not have the relcache entries there at all. * (Fortunately, since one of the entries is local in our transaction, * it's sufficient to clear out our own relcache this way; the problem * cannot arise for other backends when they see our update on the * non-local relation.) */ RelationForgetRelation(r1); RelationForgetRelation(r2); /* Clean up. */ heap_freetuple(reltup1); heap_freetuple(reltup2); #if PG_VERSION_NUM >= 120000 table_close(relRelation, RowExclusiveLock); #else heap_close(relRelation, RowExclusiveLock); #endif } /** * @fn Datum repack_index_swap(PG_FUNCTION_ARGS) * @brief Swap out an original index on a table with the newly-created one. * * repack_index_swap(index) * * @param index Oid of the *original* index. * @retval void */ Datum repack_index_swap(PG_FUNCTION_ARGS) { Oid orig_idx_oid = PG_GETARG_OID(0); Oid repacked_idx_oid; StringInfoData str; SPITupleTable *tuptable; TupleDesc desc; HeapTuple tuple; /* authority check */ must_be_superuser("repack_index_swap"); /* connect to SPI manager */ repack_init(); initStringInfo(&str); /* Find the OID of our new index. */ appendStringInfo(&str, "SELECT oid FROM pg_class " "WHERE relname = 'index_%u' AND relkind = 'i'", orig_idx_oid); execute(SPI_OK_SELECT, str.data); if (SPI_processed != 1) elog(ERROR, "Could not find index 'index_%u', found " UINT64_FORMAT " matches", orig_idx_oid, (uint64) SPI_processed); tuptable = SPI_tuptable; desc = tuptable->tupdesc; tuple = tuptable->vals[0]; repacked_idx_oid = getoid(tuple, desc, 1); swap_heap_or_index_files(orig_idx_oid, repacked_idx_oid); SPI_finish(); PG_RETURN_VOID(); } /** * @fn Datum get_table_and_inheritors(PG_FUNCTION_ARGS) * @brief Return array containing Oids of parent table and its children. * Note that this function does not release relation locks. * * get_table_and_inheritors(table) * * @param table parent table. * @retval regclass[] */ Datum repack_get_table_and_inheritors(PG_FUNCTION_ARGS) { Oid parent = PG_GETARG_OID(0); List *relations; Datum *relations_array; int relations_array_size; ArrayType *result; ListCell *lc; int i; LockRelationOid(parent, AccessShareLock); /* Check that parent table exists */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent))) PG_RETURN_ARRAYTYPE_P(construct_empty_array(OIDOID)); /* Also check that children exist */ relations = find_all_inheritors(parent, AccessShareLock, NULL); relations_array_size = list_length(relations); if (relations_array_size == 0) PG_RETURN_ARRAYTYPE_P(construct_empty_array(OIDOID)); relations_array = palloc(relations_array_size * sizeof(Datum)); i = 0; foreach (lc, relations) relations_array[i++] = ObjectIdGetDatum(lfirst_oid(lc)); result = construct_array(relations_array, relations_array_size, OIDOID, sizeof(Oid), true, 'i'); pfree(relations_array); PG_RETURN_ARRAYTYPE_P(result); } pg_repack-ver_1.5.0/msvc/000077500000000000000000000000001452746007700153105ustar00rootroot00000000000000pg_repack-ver_1.5.0/msvc/bin.2010.vcxproj000066400000000000000000000513101452746007700200560ustar00rootroot00000000000000 8.3 Win32 8.3 x64 8.4 Win32 8.4 x64 9.0 Win32 9.0 x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2} bin Win32Proj Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)/bin/x86/$(Configuration)/bin/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/bin/ $(SolutionDir)/bin/x86/$(Configuration)/bin/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/bin/ $(SolutionDir)/bin/x86/$(Configuration)/bin/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/bin/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ false false false false false false C:\Program Files %28x86%29\PostgreSQL\9.0\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\9.0\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\9.0\include\server;C:\Program Files %28x86%29\PostgreSQL\9.0\include;C:\Program Files (x86)\PostgreSQL\9.0\include\internal;$(IncludePath) C:\Program Files\PostgreSQL\9.0\include\server\port\win32_msvc;C:\Program Files\PostgreSQL\9.0\include\server\port\win32;C:\Program Files\PostgreSQL\9.0\include\server;C:\Program Files\PostgreSQL\9.0\include;C:\Program Files\PostgreSQL\9.0\include\internal;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server;C:\Program Files %28x86%29\PostgreSQL\8.4\include;C:\Program Files (x86)\PostgreSQL\8.4\include\internal;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server;C:\Program Files %28x86%29\PostgreSQL\8.4\include;C:\Program Files (x86)\PostgreSQL\8.4\include\internal;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server;C:\Program Files %28x86%29\PostgreSQL\8.3\include;C:\Program Files (x86)\PostgreSQL\8.3\include\internal;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server;C:\Program Files %28x86%29\PostgreSQL\8.4\include;C:\Program Files (x86)\PostgreSQL\8.4\include\internal;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\9.0\lib;$(LibraryPath) C:\Program Files\PostgreSQL\9.0\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.4\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.4\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.3\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.4\lib;$(LibraryPath) pg_repack pg_repack pg_repack pg_repack pg_repack pg_repack ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl-8.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl.lib $(OutDir)/$(TargetFileName) false Console true true ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl-8.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) kernel32.lib;advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl-8.lib $(OutDir)/$(TargetFileName) false Console true true ../include;%(AdditionalIncludeDirectories) _USE_32BIT_TIME_T;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl-8.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) _USE_32BIT_TIME_T;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) kernel32.lib;advapi32.lib;ws2_32.lib;libpq.lib;libpgport.lib;libintl-8.lib $(OutDir)/$(TargetFileName) false Console true true pg_repack-ver_1.5.0/msvc/bin.2010.vcxproj.filters000066400000000000000000000052011452746007700215230ustar00rootroot00000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hpp;hxx;hm;inl;inc;xsd {5f942836-8862-4aa3-8573-a6b80a4fbe4f} {e0adaf77-900d-4307-9a5f-6be049d0d93b} {5cf8792b-6351-4f2c-88db-784d7d8a425c} {6a46f2b1-6c15-44c2-bf14-500562f0fc30} doc doc doc doc doc doc regress\expected regress\expected regress\sql regress\sql doc src src src include include pg_repack-ver_1.5.0/msvc/bin.vcproj000066400000000000000000000064761452746007700173220ustar00rootroot00000000000000 pg_repack-ver_1.5.0/msvc/lib.2010.vcxproj000066400000000000000000000453651452746007700200710ustar00rootroot00000000000000 8.3 Win32 8.3 x64 8.4 Win32 8.4 x64 9.0 Win32 9.0 x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2} lib Win32Proj DynamicLibrary Unicode true Windows7.1SDK DynamicLibrary Unicode true Windows7.1SDK DynamicLibrary Unicode true Windows7.1SDK DynamicLibrary Unicode true Windows7.1SDK DynamicLibrary Unicode true Windows7.1SDK DynamicLibrary Unicode true Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)/bin/x86/$(Configuration)/lib/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/lib/ $(SolutionDir)/bin/x86/$(Configuration)/lib/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/lib/ $(SolutionDir)/bin/x86/$(Configuration)/lib/ $(SolutionDir)/bin/$(Platform)/$(Configuration)/lib/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/x86/$(Configuration)/$(ProjectName)/ $(SolutionDir)/obj/$(Platform)/$(Configuration)/$(ProjectName)/ false false false false false false C:\Program Files %28x86%29\PostgreSQL\9.0\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\9.0\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\9.0\include\server;C:\Program Files %28x86%29\PostgreSQL\9.0\include;$(IncludePath) C:\Program Files\PostgreSQL\9.0\include\server\port\win32_msvc;C:\Program Files\PostgreSQL\9.0\include\server\port\win32;C:\Program Files\PostgreSQL\9.0\include\server;C:\Program Files\PostgreSQL\9.0\include;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server;C:\Program Files %28x86%29\PostgreSQL\8.4\include;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.4\include\server;C:\Program Files %28x86%29\PostgreSQL\8.4\include;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server;C:\Program Files %28x86%29\PostgreSQL\8.3\include;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32_msvc;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server\port\win32;C:\Program Files %28x86%29\PostgreSQL\8.3\include\server;C:\Program Files %28x86%29\PostgreSQL\8.3\include;$(IncludePath) C:\Program Files %28x86%29\PostgreSQL\9.0\lib;$(LibraryPath) C:\Program Files\PostgreSQL\9.0\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.4\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.4\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.3\lib;$(LibraryPath) C:\Program Files %28x86%29\PostgreSQL\8.3\lib;$(LibraryPath) pg_repack pg_repack pg_repack pg_repack pg_repack pg_repack ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true ../include;%(AdditionalIncludeDirectories) _USE_32BIT_TIME_T;WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true MachineX86 ../include;%(AdditionalIncludeDirectories) _USE_32BIT_TIME_T;WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL Level3 false CompileAsC 4005;4996;4018;%(DisableSpecificWarnings) postgres.lib $(OutDir)/$(TargetFileName) false Console true true pg_repack-ver_1.5.0/msvc/lib.2010.vcxproj.filters000066400000000000000000000023741452746007700215310ustar00rootroot00000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hpp;hxx;hm;inl;inc;xsd src src src include include pg_repack-ver_1.5.0/msvc/lib.vcproj000066400000000000000000000060641452746007700173110ustar00rootroot00000000000000 pg_repack-ver_1.5.0/msvc/pg_repack.2010.sln000066400000000000000000000046711452746007700203520ustar00rootroot00000000000000 Microsoft Visual Studio Solution File, Format Version 11.00 # Visual C++ Express 2010 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bin.2010", "bin.2010.vcxproj", "{B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lib.2010", "lib.2010.vcxproj", "{B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution 8.3|Win32 = 8.3|Win32 8.3|x64 = 8.3|x64 8.4|Win32 = 8.4|Win32 8.4|x64 = 8.4|x64 9.0|Win32 = 9.0|Win32 9.0|x64 = 9.0|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.3|Win32.ActiveCfg = 8.3|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.3|Win32.Build.0 = 8.3|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.3|x64.ActiveCfg = 8.3|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.3|x64.Build.0 = 8.3|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.4|Win32.ActiveCfg = 8.4|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.4|Win32.Build.0 = 8.4|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.4|x64.ActiveCfg = 8.4|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.8.4|x64.Build.0 = 8.4|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.9.0|Win32.ActiveCfg = 9.0|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.9.0|Win32.Build.0 = 9.0|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.9.0|x64.ActiveCfg = 9.0|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.9.0|x64.Build.0 = 9.0|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.3|Win32.ActiveCfg = 8.3|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.3|Win32.Build.0 = 8.3|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.3|x64.ActiveCfg = 8.3|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.3|x64.Build.0 = 8.3|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.4|Win32.ActiveCfg = 8.4|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.4|Win32.Build.0 = 8.4|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.4|x64.ActiveCfg = 8.4|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.8.4|x64.Build.0 = 8.4|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.9.0|Win32.ActiveCfg = 9.0|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.9.0|Win32.Build.0 = 9.0|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.9.0|x64.ActiveCfg = 9.0|x64 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.9.0|x64.Build.0 = 9.0|x64 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal pg_repack-ver_1.5.0/msvc/pg_repack.sln000066400000000000000000000017251452746007700177660ustar00rootroot00000000000000 Microsoft Visual Studio Solution File, Format Version 9.00 # Visual C++ Express 2005 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bin", "bin.vcproj", "{B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lib", "lib.vcproj", "{B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Release|Win32 = Release|Win32 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.Release|Win32.ActiveCfg = Release|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C1E2}.Release|Win32.Build.0 = Release|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.Release|Win32.ActiveCfg = Release|Win32 {B6B37F22-9E44-4240-AAA0-650D4AC2C2E2}.Release|Win32.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal pg_repack-ver_1.5.0/msvc/readme.txt000066400000000000000000000024511452746007700173100ustar00rootroot00000000000000How to build with Microsoft Visual C++ Express 2005 You might need: 1. Register PostgreSQL directory to your environment. 2. Resolve redefinitions of ERROR macro. ---- 1. Register PostgreSQL directory to your environment. The directory configuration options are found in: Tool > Option > Projects and Solutions > VC++ directory You might need to add the following directories: into "include files" - C:\Program Files\PostgreSQL\8.4\include - C:\Program Files\PostgreSQL\8.4\include\internal - C:\Program Files\PostgreSQL\8.4\include\server - C:\Program Files\PostgreSQL\8.4\include\server\port\win32 - C:\Program Files\PostgreSQL\8.4\include\server\port\win32_msvc into "library files" - C:\Program Files\PostgreSQL\8.4\lib ---- 2. Resolve redefinitions of ERROR macro. It might be a bad manner, but I'll recommend to modify your wingdi.h. --- wingdi.h 2008-01-18 22:17:42.000000000 +0900 +++ wingdi.fixed.h 2010-03-03 09:51:43.015625000 +0900 @@ -101,11 +101,10 @@ #endif // (_WIN32_WINNT >= _WIN32_WINNT_WINXP) /* Region Flags */ -#define ERROR 0 +#define RGN_ERROR 0 #define NULLREGION 1 #define SIMPLEREGION 2 #define COMPLEXREGION 3 -#define RGN_ERROR ERROR /* CombineRgn() Styles */ #define RGN_AND 1 pg_repack-ver_1.5.0/regress/000077500000000000000000000000001452746007700160125ustar00rootroot00000000000000pg_repack-ver_1.5.0/regress/Makefile000066400000000000000000000013771452746007700174620ustar00rootroot00000000000000# # pg_repack: regress/Makefile # # Portions Copyright (c) 2008-2012, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # Portions Copyright (c) 2011, Itagaki Takahiro # Portions Copyright (c) 2012-2020, The Reorg Development Team # PG_CONFIG ?= pg_config # version as a number, e.g. 9.1.4 -> 901 VERSION := $(shell $(PG_CONFIG) --version | sed 's/.* \([[:digit:].]\{1,\}\).*/\1/') INTVERSION := $(shell echo $$(($$(echo $(VERSION).0 | sed 's/\([[:digit:]]\{1,\}\)\.\([[:digit:]]\{1,\}\).*/\1*100+\2/')))) # # Test suite # REGRESS := init-extension repack-setup repack-run error-on-invalid-idx after-schema repack-check nosuper tablespace get_order_by trigger USE_PGXS = 1 # use pgxs if not in contrib directory PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) pg_repack-ver_1.5.0/regress/expected/000077500000000000000000000000001452746007700176135ustar00rootroot00000000000000pg_repack-ver_1.5.0/regress/expected/after-schema.out000066400000000000000000000043561452746007700227130ustar00rootroot00000000000000-- -- tables schema after running repack -- \d tbl_cluster Table "public.tbl_cluster" Column | Type | Modifiers --------+-----------------------------+----------- col1 | integer | not null time | timestamp without time zone | ,") | text | not null Indexes: "tbl_cluster_pkey" PRIMARY KEY, btree (","")", col1) WITH (fillfactor='75') ",") cluster" btree ("time", length(","")"), ","")" text_pattern_ops) WITH (fillfactor='75') CLUSTER \d tbl_gistkey Table "public.tbl_gistkey" Column | Type | Modifiers --------+---------+----------- id | integer | not null c | circle | Indexes: "tbl_gistkey_pkey" PRIMARY KEY, btree (id) "cidx_circle" gist (c) CLUSTER \d tbl_only_ckey Table "public.tbl_only_ckey" Column | Type | Modifiers --------+-----------------------------+----------- col1 | integer | col2 | timestamp without time zone | ,") | text | Indexes: "cidx_only_ckey" btree (col2, ","")") CLUSTER \d tbl_only_pkey Table "public.tbl_only_pkey" Column | Type | Modifiers --------+---------+----------- col1 | integer | not null ,") | text | Indexes: "tbl_only_pkey_pkey" PRIMARY KEY, btree (col1) \d tbl_with_dropped_column Table "public.tbl_with_dropped_column" Column | Type | Modifiers --------+---------+----------- c1 | text | id | integer | not null c2 | text | c3 | text | Indexes: "tbl_with_dropped_column_pkey" PRIMARY KEY, btree (id) WITH (fillfactor='75') CLUSTER "idx_c1c2" btree (c1, c2) WITH (fillfactor='75') "idx_c2c1" btree (c2, c1) \d tbl_with_dropped_toast Table "public.tbl_with_dropped_toast" Column | Type | Modifiers --------+---------+----------- i | integer | not null j | integer | not null Indexes: "tbl_with_dropped_toast_pkey" PRIMARY KEY, btree (i, j) CLUSTER \d tbl_idxopts Table "public.tbl_idxopts" Column | Type | Modifiers --------+---------+----------- i | integer | not null t | text | Indexes: "tbl_idxopts_pkey" PRIMARY KEY, btree (i) "idxopts_t" btree (t DESC NULLS LAST) WHERE t <> 'aaa'::text pg_repack-ver_1.5.0/regress/expected/after-schema_1.out000066400000000000000000000056611452746007700231330ustar00rootroot00000000000000-- -- tables schema after running repack -- \d tbl_cluster Table "public.tbl_cluster" Column | Type | Collation | Nullable | Default --------+-----------------------------+-----------+----------+--------- col1 | integer | | not null | time | timestamp without time zone | | | ,") | text | | not null | Indexes: "tbl_cluster_pkey" PRIMARY KEY, btree (","")", col1) WITH (fillfactor='75') ",") cluster" btree ("time", length(","")"), ","")" text_pattern_ops) WITH (fillfactor='75') CLUSTER \d tbl_gistkey Table "public.tbl_gistkey" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | c | circle | | | Indexes: "tbl_gistkey_pkey" PRIMARY KEY, btree (id) "cidx_circle" gist (c) CLUSTER \d tbl_only_ckey Table "public.tbl_only_ckey" Column | Type | Collation | Nullable | Default --------+-----------------------------+-----------+----------+--------- col1 | integer | | | col2 | timestamp without time zone | | | ,") | text | | | Indexes: "cidx_only_ckey" btree (col2, ","")") CLUSTER \d tbl_only_pkey Table "public.tbl_only_pkey" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | ,") | text | | | Indexes: "tbl_only_pkey_pkey" PRIMARY KEY, btree (col1) \d tbl_with_dropped_column Table "public.tbl_with_dropped_column" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c1 | text | | | id | integer | | not null | c2 | text | | | c3 | text | | | Indexes: "tbl_with_dropped_column_pkey" PRIMARY KEY, btree (id) WITH (fillfactor='75') CLUSTER "idx_c1c2" btree (c1, c2) WITH (fillfactor='75') "idx_c2c1" btree (c2, c1) \d tbl_with_dropped_toast Table "public.tbl_with_dropped_toast" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- i | integer | | not null | j | integer | | not null | Indexes: "tbl_with_dropped_toast_pkey" PRIMARY KEY, btree (i, j) CLUSTER \d tbl_idxopts Table "public.tbl_idxopts" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- i | integer | | not null | t | text | | | Indexes: "tbl_idxopts_pkey" PRIMARY KEY, btree (i) "idxopts_t" btree (t DESC NULLS LAST) WHERE t <> 'aaa'::text pg_repack-ver_1.5.0/regress/expected/error-on-invalid-idx.out000066400000000000000000000020471452746007700243200ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --error-on-invalid-index INFO: repacking table "public.tbl_cluster" \! pg_repack --dbname=contrib_regression --table=tbl_badindex --error-on-invalid-index INFO: repacking table "public.tbl_badindex" WARNING: Invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) \! pg_repack --dbname=contrib_regression --error-on-invalid-index INFO: repacking table "public.tbl_badindex" WARNING: Invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) INFO: repacking table "public.tbl_cluster" INFO: repacking table "public.tbl_gistkey" INFO: repacking table "public.tbl_idxopts" INFO: repacking table "public.tbl_only_pkey" INFO: repacking table "public.tbl_order" INFO: repacking table "public.tbl_storage_plain" INFO: repacking table "public.tbl_with_dropped_column" INFO: repacking table "public.tbl_with_dropped_toast" INFO: repacking table "public.tbl_with_mod_column_storage" INFO: repacking table "public.tbl_with_toast" pg_repack-ver_1.5.0/regress/expected/error-on-invalid-idx_1.out000066400000000000000000000011111452746007700245270ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --error-on-invalid-index INFO: repacking table "public.tbl_cluster" \! pg_repack --dbname=contrib_regression --table=tbl_badindex --error-on-invalid-index INFO: repacking table "public.tbl_badindex" WARNING: Invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) \! pg_repack --dbname=contrib_regression --error-on-invalid-index INFO: repacking table "public.tbl_badindex" WARNING: Invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) pg_repack-ver_1.5.0/regress/expected/get_order_by.out000066400000000000000000000047241452746007700230170ustar00rootroot00000000000000-- -- pg_repack issue #3 -- CREATE TABLE issue3_1 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_1_idx ON issue3_1 (col1, col2 DESC); SELECT repack.get_order_by('issue3_1_idx'::regclass::oid, 'issue3_1'::regclass::oid); get_order_by ----------------- col1, col2 DESC (1 row) \! pg_repack --dbname=contrib_regression --table=issue3_1 INFO: repacking table "public.issue3_1" CREATE TABLE issue3_2 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_2_idx ON issue3_2 (col1 DESC, col2 text_pattern_ops); SELECT repack.get_order_by('issue3_2_idx'::regclass::oid, 'issue3_2'::regclass::oid); get_order_by --------------------------- col1 DESC, col2 USING ~<~ (1 row) \! pg_repack --dbname=contrib_regression --table=issue3_2 INFO: repacking table "public.issue3_2" CREATE TABLE issue3_3 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_3_idx ON issue3_3 (col1 DESC, col2 DESC); SELECT repack.get_order_by('issue3_3_idx'::regclass::oid, 'issue3_3'::regclass::oid); get_order_by ---------------------- col1 DESC, col2 DESC (1 row) \! pg_repack --dbname=contrib_regression --table=issue3_3 INFO: repacking table "public.issue3_3" CREATE TABLE issue3_4 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_4_idx ON issue3_4 (col1 NULLS FIRST, col2 text_pattern_ops DESC NULLS LAST); SELECT repack.get_order_by('issue3_4_idx'::regclass::oid, 'issue3_4'::regclass::oid); get_order_by -------------------------------------------------- col1 NULLS FIRST, col2 DESC USING ~<~ NULLS LAST (1 row) \! pg_repack --dbname=contrib_regression --table=issue3_4 INFO: repacking table "public.issue3_4" CREATE TABLE issue3_5 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_5_idx ON issue3_5 (col1 DESC NULLS FIRST, col2 COLLATE "POSIX" DESC); SELECT repack.get_order_by('issue3_5_idx'::regclass::oid, 'issue3_5'::regclass::oid); get_order_by -------------------------------------- col1 DESC, col2 COLLATE "POSIX" DESC (1 row) \! pg_repack --dbname=contrib_regression --table=issue3_5 INFO: repacking table "public.issue3_5" -- -- pg_repack issue #321 -- CREATE TABLE issue321 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue321_idx ON issue321 (col1); SELECT repack.get_order_by('issue321_idx'::regclass::oid, 1); ERROR: table name not found for OID 1 SELECT repack.get_order_by(1, 1); ERROR: cache lookup failed for index 1 pg_repack-ver_1.5.0/regress/expected/init-extension.out000066400000000000000000000001321452746007700233150ustar00rootroot00000000000000SET client_min_messages = warning; CREATE EXTENSION pg_repack; RESET client_min_messages; pg_repack-ver_1.5.0/regress/expected/nosuper.out000066400000000000000000000013611452746007700220400ustar00rootroot00000000000000-- -- no superuser check -- SET client_min_messages = error; DROP ROLE IF EXISTS nosuper; SET client_min_messages = warning; CREATE ROLE nosuper WITH LOGIN; -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_cluster --no-superuser-check INFO: repacking table "public.tbl_cluster" -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper ERROR: pg_repack failed with error: You must be a superuser to use pg_repack -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper --no-superuser-check ERROR: pg_repack failed with error: ERROR: permission denied for schema repack LINE 1: select repack.version(), repack.version_sql() ^ DROP ROLE IF EXISTS nosuper; pg_repack-ver_1.5.0/regress/expected/nosuper_1.out000066400000000000000000000012521452746007700222570ustar00rootroot00000000000000-- -- no superuser check -- SET client_min_messages = error; DROP ROLE IF EXISTS nosuper; SET client_min_messages = warning; CREATE ROLE nosuper WITH LOGIN; -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_cluster --no-superuser-check INFO: repacking table "public.tbl_cluster" -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper ERROR: pg_repack failed with error: You must be a superuser to use pg_repack -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper --no-superuser-check ERROR: pg_repack failed with error: ERROR: permission denied for schema repack DROP ROLE IF EXISTS nosuper; pg_repack-ver_1.5.0/regress/expected/repack-check.out000066400000000000000000000432111452746007700226650ustar00rootroot00000000000000SET client_min_messages = warning; SELECT col1, to_char("time", 'YYYY-MM-DD HH24:MI:SS'), ","")" FROM tbl_cluster ORDER BY 1, 2; col1 | to_char | ,") ------+---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 1 | 2008-12-31 10:00:00 | admin 2 | 2008-01-01 00:00:00 | king 3 | 2008-03-04 12:00:00 | joker 4 | 2008-03-05 15:00:00 | queen 5 | 2008-01-01 00:30:00 | 1.4142135623730950488016887242096980785696718753769480731766797379907324784621070388503875343276415727350138462309122970249248360558507372126441214970999358314132226659275055927557999505011527820605714701095599716059702745345968620147285174186408891986095523292304843087143214508397626036279952514079896872533965463318088296406206152583523950547457502877599617298355752203375318570113543746034084988471603868999706990048150305440277903164542478230684929369186215805784631115966687130130156185689872372352885092648612494977154218334204285686060146824720771435854874155657069677653720226485447015858801620758474922657226002085584466521458398893944370926591800311388246468157082630100594858704003186480342194897278290641045072636881313739855256117322040245091227700226941127573627280495738108967504018369868368450725799364729060762996941380475654823728997180326802474420629269124859052181004459842150591120249441341728531478105803603371077309182869314710171111683916581726889419758716582152128229518488471.732050807568877293527446341505872366942805253810380628055806979451933016908800037081146186757248575675626141415406703029969945094998952478811655512094373648528093231902305582067974820101084674923265015312343266903322886650672254668921837971227047131660367861588019049986537379859389467650347506576050756618348129606100947602187190325083145829523959832997789824508288714463832917347224163984587855397667958063818353666110843173780894378316102088305524901670023520711144288695990956365797087168498072899493296484283020786408603988738697537582317317831395992983007838702877053913369563312103707264019249106768231199288375641141422016742752102372994270831059898459475987664288897796147837958390228854852903576033852808064381972344661059689722872865264153822664698420021195484155278441181286534507035191650016689294415480846071277143999762926834629577438361895110127148638746976545982451788550975379013880664961911962222957110555242923723192197738262561631468842032853716682938649611917049738836395495938 (5 rows) SELECT * FROM tbl_only_ckey ORDER BY 1; col1 | col2 | ,") ------+--------------------------+----- 1 | Tue Jan 01 00:00:00 2008 | abc 2 | Fri Feb 01 00:00:00 2008 | def (2 rows) SELECT * FROM tbl_only_pkey ORDER BY 1; col1 | ,") ------+----- 1 | abc 2 | def (2 rows) SELECT * FROM tbl_gistkey ORDER BY 1; id | c ----+----------- 1 | <(1,2),3> 2 | <(4,5),6> (2 rows) SET enable_seqscan = on; SET enable_indexscan = off; SELECT * FROM tbl_with_dropped_column ; c1 | id | c2 | c3 ----+----+----+---- c1 | 1 | c2 | c1 | 2 | c2 | (2 rows) SELECT * FROM view_for_dropped_column ORDER BY 1, 2; c1 | id | c2 | c3 ----+----+----+---- c1 | 1 | c2 | c1 | 2 | c2 | (2 rows) SELECT * FROM tbl_with_dropped_toast; i | j ---+---- 1 | 10 2 | 20 (2 rows) SET enable_seqscan = off; SET enable_indexscan = on; SELECT * FROM tbl_with_dropped_column ORDER BY 1, 2; c1 | id | c2 | c3 ----+----+----+---- c1 | 1 | c2 | c1 | 2 | c2 | (2 rows) SELECT * FROM view_for_dropped_column; c1 | id | c2 | c3 ----+----+----+---- c1 | 1 | c2 | c1 | 2 | c2 | (2 rows) SELECT * FROM tbl_with_dropped_toast; i | j ---+---- 1 | 10 2 | 20 (2 rows) RESET enable_seqscan; RESET enable_indexscan; -- check if storage option for both table and TOAST table didn't go away. SELECT CASE relkind WHEN 'r' THEN relname WHEN 't' THEN 'toast_table' END as table, reloptions FROM pg_class WHERE relname = 'tbl_with_toast' OR relname = 'pg_toast_' || 'tbl_with_toast'::regclass::oid ORDER BY 1; table | reloptions ----------------+--------------------------------------------------------------------- tbl_with_toast | {autovacuum_vacuum_scale_factor=30,autovacuum_vacuum_threshold=300} toast_table | {autovacuum_vacuum_scale_factor=40,autovacuum_vacuum_threshold=400} (2 rows) SELECT pg_relation_size(reltoastrelid) = 0 as check_toast_rel_size FROM pg_class WHERE relname = 'tbl_with_mod_column_storage'; check_toast_rel_size ---------------------- t (1 row) -- -- check broken links or orphan toast relations -- SELECT oid, relname FROM pg_class WHERE relkind = 't' AND oid NOT IN (SELECT reltoastrelid FROM pg_class WHERE relkind = 'r'); oid | relname -----+--------- (0 rows) SELECT oid, relname FROM pg_class WHERE relkind = 'r' AND reltoastrelid <> 0 AND reltoastrelid NOT IN (SELECT oid FROM pg_class WHERE relkind = 't'); oid | relname -----+--------- (0 rows) -- check columns options SELECT attname, attstattarget, attoptions FROM pg_attribute WHERE attrelid = 'tbl_idxopts'::regclass AND attnum > 0 ORDER BY attnum; attname | attstattarget | attoptions ---------+---------------+------------------- i | 1 | t | -1 | {n_distinct=-0.5} (2 rows) -- -- NOT NULL UNIQUE -- CREATE TABLE tbl_nn (col1 int NOT NULL, col2 int NOT NULL); CREATE TABLE tbl_uk (col1 int NOT NULL, col2 int , UNIQUE(col1, col2)); CREATE TABLE tbl_nn_uk (col1 int NOT NULL, col2 int NOT NULL, UNIQUE(col1, col2)); CREATE TABLE tbl_pk_uk (col1 int NOT NULL, col2 int NOT NULL, PRIMARY KEY(col1, col2), UNIQUE(col2, col1)); CREATE TABLE tbl_nn_puk (col1 int NOT NULL, col2 int NOT NULL); CREATE UNIQUE INDEX tbl_nn_puk_pcol1_idx ON tbl_nn_puk(col1) WHERE col1 < 10; \! pg_repack --dbname=contrib_regression --table=tbl_nn WARNING: relation "public.tbl_nn" must have a primary key or not-null unique keys -- => WARNING \! pg_repack --dbname=contrib_regression --table=tbl_uk WARNING: relation "public.tbl_uk" must have a primary key or not-null unique keys -- => WARNING \! pg_repack --dbname=contrib_regression --table=tbl_nn_uk INFO: repacking table "public.tbl_nn_uk" -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_pk_uk INFO: repacking table "public.tbl_pk_uk" -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_pk_uk --only-indexes INFO: repacking indexes of "tbl_pk_uk" INFO: repacking index "public.tbl_pk_uk_col2_col1_key" INFO: repacking index "public.tbl_pk_uk_pkey" -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_nn_puk WARNING: relation "public.tbl_nn_puk" must have a primary key or not-null unique keys -- => WARNING -- -- Triggers handling -- CREATE FUNCTION trgtest() RETURNS trigger AS $$BEGIN RETURN NEW; END$$ LANGUAGE plpgsql; CREATE TABLE trg1 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger_1 AFTER UPDATE ON trg1 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg1 INFO: repacking table "public.trg1" CREATE TABLE trg2 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger AFTER UPDATE ON trg2 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg2 INFO: repacking table "public.trg2" WARNING: the table "public.trg2" already has a trigger called "repack_trigger" DETAIL: The trigger was probably installed during a previous attempt to run pg_repack on the table which was interrupted and for some reason failed to clean up the temporary objects. Please drop the trigger or drop and recreate the pg_repack extension altogether to remove all the temporary objects left over. CREATE TABLE trg3 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger_1 BEFORE UPDATE ON trg3 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg3 INFO: repacking table "public.trg3" -- -- Table re-organization using specific column -- -- reorganize table using cluster key. Sort in ascending order. \! pg_repack --dbname=contrib_regression --table=tbl_order INFO: repacking table "public.tbl_order" SELECT ctid, c FROM tbl_order WHERE ctid <= '(0,10)'; ctid | c --------+---- (0,1) | 1 (0,2) | 2 (0,3) | 3 (0,4) | 4 (0,5) | 5 (0,6) | 6 (0,7) | 7 (0,8) | 8 (0,9) | 9 (0,10) | 10 (10 rows) -- reorganize table using specific column order. Sort in descending order. \! pg_repack --dbname=contrib_regression --table=tbl_order -o "c DESC" INFO: repacking table "public.tbl_order" SELECT ctid, c FROM tbl_order WHERE ctid <= '(0,10)'; ctid | c --------+----- (0,1) | 100 (0,2) | 99 (0,3) | 98 (0,4) | 97 (0,5) | 96 (0,6) | 95 (0,7) | 94 (0,8) | 93 (0,9) | 92 (0,10) | 91 (10 rows) -- -- Dry run -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --dry-run INFO: Dry run enabled, not executing repack INFO: repacking table "public.tbl_cluster" -- Test --schema -- CREATE SCHEMA test_schema1; CREATE TABLE test_schema1.tbl1 (id INTEGER PRIMARY KEY); CREATE TABLE test_schema1.tbl2 (id INTEGER PRIMARY KEY); CREATE SCHEMA test_schema2; CREATE TABLE test_schema2.tbl1 (id INTEGER PRIMARY KEY); CREATE TABLE test_schema2.tbl2 (id INTEGER PRIMARY KEY); -- => OK \! pg_repack --dbname=contrib_regression --schema=test_schema1 INFO: repacking table "test_schema1.tbl1" INFO: repacking table "test_schema1.tbl2" -- => OK \! pg_repack --dbname=contrib_regression --schema=test_schema1 --schema=test_schema2 INFO: repacking table "test_schema1.tbl1" INFO: repacking table "test_schema1.tbl2" INFO: repacking table "test_schema2.tbl1" INFO: repacking table "test_schema2.tbl2" -- => ERROR \! pg_repack --dbname=contrib_regression --schema=test_schema1 --table=tbl1 ERROR: cannot repack specific table(s) in schema, use schema.table notation instead -- => ERROR \! pg_repack --dbname=contrib_regression --all --schema=test_schema1 ERROR: cannot repack specific schema(s) in all databases -- -- don't kill backend -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --no-kill-backend INFO: repacking table "public.tbl_cluster" -- -- exclude extension check -- CREATE SCHEMA exclude_extension_schema; CREATE TABLE exclude_extension_schema.tbl(val integer primary key); -- => ERROR \! pg_repack --dbname=contrib_regression --table=dummy_table --exclude-extension=dummy_extension ERROR: cannot specify --table (-t) and --exclude-extension (-C) -- => ERROR \! pg_repack --dbname=contrib_regression --table=dummy_table --exclude-extension=dummy_extension -x ERROR: cannot specify --only-indexes (-x) and --exclude-extension (-C) -- => ERROR \! pg_repack --dbname=contrib_regression --index=dummy_index --exclude-extension=dummy_extension ERROR: cannot specify --index (-i) and --exclude-extension (-C) -- => OK \! pg_repack --dbname=contrib_regression --schema=exclude_extension_schema --exclude-extension=dummy_extension INFO: repacking table "exclude_extension_schema.tbl" -- => OK \! pg_repack --dbname=contrib_regression --schema=exclude_extension_schema --exclude-extension=dummy_extension --exclude-extension=dummy_extension INFO: repacking table "exclude_extension_schema.tbl" -- -- table inheritance check -- CREATE TABLE parent_a(val integer primary key); CREATE TABLE child_a_1(val integer primary key) INHERITS(parent_a); CREATE TABLE child_a_2(val integer primary key) INHERITS(parent_a); CREATE TABLE parent_b(val integer primary key); CREATE TABLE child_b_1(val integer primary key) INHERITS(parent_b); CREATE TABLE child_b_2(val integer primary key) INHERITS(parent_b); -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table ERROR: pg_repack failed with error: ERROR: relation "dummy_table" does not exist -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_index --index=dummy_index ERROR: cannot specify --index (-i) and --parent-table (-I) -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table --schema=dummy_schema ERROR: cannot repack specific table(s) in schema, use schema.table notation instead -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table --all ERROR: cannot repack specific table(s) in all databases -- => OK \! pg_repack --dbname=contrib_regression --table=parent_a --parent-table=parent_b INFO: repacking table "public.child_b_1" INFO: repacking table "public.child_b_2" INFO: repacking table "public.parent_a" INFO: repacking table "public.parent_b" -- => OK \! pg_repack --dbname=contrib_regression --parent-table=parent_a --parent-table=parent_b INFO: repacking table "public.child_a_1" INFO: repacking table "public.child_a_2" INFO: repacking table "public.child_b_1" INFO: repacking table "public.child_b_2" INFO: repacking table "public.parent_a" INFO: repacking table "public.parent_b" -- => OK \! pg_repack --dbname=contrib_regression --table=parent_a --parent-table=parent_b --only-indexes INFO: repacking indexes of "parent_a" INFO: repacking index "public.parent_a_pkey" INFO: repacking indexes of "public.child_b_1" INFO: repacking index "public.child_b_1_pkey" INFO: repacking indexes of "public.child_b_2" INFO: repacking index "public.child_b_2_pkey" INFO: repacking indexes of "public.parent_b" INFO: repacking index "public.parent_b_pkey" -- => OK \! pg_repack --dbname=contrib_regression --parent-table=parent_a --parent-table=parent_b --only-indexes INFO: repacking indexes of "public.child_a_1" INFO: repacking index "public.child_a_1_pkey" INFO: repacking indexes of "public.child_a_2" INFO: repacking index "public.child_a_2_pkey" INFO: repacking indexes of "public.parent_a" INFO: repacking index "public.parent_a_pkey" INFO: repacking indexes of "public.child_b_1" INFO: repacking index "public.child_b_1_pkey" INFO: repacking indexes of "public.child_b_2" INFO: repacking index "public.child_b_2_pkey" INFO: repacking indexes of "public.parent_b" INFO: repacking index "public.parent_b_pkey" -- -- Switch threshold -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --switch-threshold 200 INFO: repacking table "public.tbl_cluster" pg_repack-ver_1.5.0/regress/expected/repack-run.out000066400000000000000000000017401452746007700224150ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster INFO: repacking table "public.tbl_cluster" \! pg_repack --dbname=contrib_regression --table=tbl_badindex INFO: repacking table "public.tbl_badindex" WARNING: skipping invalid index: CREATE UNIQUE INDEX idx_badindex_n ON tbl_badindex USING btree (n) \! pg_repack --dbname=contrib_regression INFO: repacking table "public.tbl_badindex" WARNING: skipping invalid index: CREATE UNIQUE INDEX idx_badindex_n ON tbl_badindex USING btree (n) INFO: repacking table "public.tbl_cluster" INFO: repacking table "public.tbl_gistkey" INFO: repacking table "public.tbl_idxopts" INFO: repacking table "public.tbl_only_pkey" INFO: repacking table "public.tbl_order" INFO: repacking table "public.tbl_storage_plain" INFO: repacking table "public.tbl_with_dropped_column" INFO: repacking table "public.tbl_with_dropped_toast" INFO: repacking table "public.tbl_with_mod_column_storage" INFO: repacking table "public.tbl_with_toast" pg_repack-ver_1.5.0/regress/expected/repack-run_1.out000066400000000000000000000017561452746007700226440ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster INFO: repacking table "public.tbl_cluster" \! pg_repack --dbname=contrib_regression --table=tbl_badindex INFO: repacking table "public.tbl_badindex" WARNING: skipping invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) \! pg_repack --dbname=contrib_regression INFO: repacking table "public.tbl_badindex" WARNING: skipping invalid index: CREATE UNIQUE INDEX idx_badindex_n ON public.tbl_badindex USING btree (n) INFO: repacking table "public.tbl_cluster" INFO: repacking table "public.tbl_gistkey" INFO: repacking table "public.tbl_idxopts" INFO: repacking table "public.tbl_only_pkey" INFO: repacking table "public.tbl_order" INFO: repacking table "public.tbl_storage_plain" INFO: repacking table "public.tbl_with_dropped_column" INFO: repacking table "public.tbl_with_dropped_toast" INFO: repacking table "public.tbl_with_mod_column_storage" INFO: repacking table "public.tbl_with_toast" pg_repack-ver_1.5.0/regress/expected/repack-setup.out000066400000000000000000000116461452746007700227570ustar00rootroot00000000000000SET client_min_messages = warning; -- -- create table. -- CREATE TABLE tbl_cluster ( col1 int, "time" timestamp, ","")" text, PRIMARY KEY (","")", col1) WITH (fillfactor = 75) ) WITH (fillfactor = 70); CREATE INDEX ","") cluster" ON tbl_cluster ("time", length(","")"), ","")" text_pattern_ops) WITH (fillfactor = 75); ALTER TABLE tbl_cluster CLUSTER ON ","") cluster"; CREATE TABLE tbl_only_pkey ( col1 int PRIMARY KEY, ","")" text ); CREATE TABLE tbl_only_ckey ( col1 int, col2 timestamp, ","")" text ) WITH (fillfactor = 70); CREATE INDEX cidx_only_ckey ON tbl_only_ckey (col2, ","")"); ALTER TABLE tbl_only_ckey CLUSTER ON cidx_only_ckey; CREATE TABLE tbl_gistkey ( id integer PRIMARY KEY, c circle ); CREATE INDEX cidx_circle ON tbl_gistkey USING gist (c); ALTER TABLE tbl_gistkey CLUSTER ON cidx_circle; CREATE TABLE tbl_with_dropped_column ( d1 text, c1 text, id integer PRIMARY KEY, d2 text, c2 text, d3 text ); ALTER INDEX tbl_with_dropped_column_pkey SET (fillfactor = 75); ALTER TABLE tbl_with_dropped_column CLUSTER ON tbl_with_dropped_column_pkey; CREATE INDEX idx_c1c2 ON tbl_with_dropped_column (c1, c2) WITH (fillfactor = 75); CREATE INDEX idx_c2c1 ON tbl_with_dropped_column (c2, c1); CREATE TABLE tbl_with_dropped_toast ( i integer, j integer, t text, PRIMARY KEY (i, j) ); ALTER TABLE tbl_with_dropped_toast CLUSTER ON tbl_with_dropped_toast_pkey; CREATE TABLE tbl_badindex ( id integer PRIMARY KEY, n integer ); CREATE TABLE tbl_idxopts ( i integer PRIMARY KEY, t text ); CREATE INDEX idxopts_t ON tbl_idxopts (t DESC NULLS LAST) WHERE (t != 'aaa'); -- Use this table to play with attribute options too ALTER TABLE tbl_idxopts ALTER i SET STATISTICS 1; ALTER TABLE tbl_idxopts ALTER t SET (n_distinct = -0.5); CREATE TABLE tbl_with_toast ( i integer PRIMARY KEY, c text ); ALTER TABLE tbl_with_toast SET (AUTOVACUUM_VACUUM_SCALE_FACTOR = 30, AUTOVACUUM_VACUUM_THRESHOLD = 300); ALTER TABLE tbl_with_toast SET (TOAST.AUTOVACUUM_VACUUM_SCALE_FACTOR = 40, TOAST.AUTOVACUUM_VACUUM_THRESHOLD = 400); CREATE TABLE tbl_with_mod_column_storage ( id integer PRIMARY KEY, c text ); ALTER TABLE tbl_with_mod_column_storage ALTER c SET STORAGE MAIN; CREATE TABLE tbl_order (c int primary key); CREATE TABLE tbl_storage_plain (c1 int primary key, c2 text); ALTER TABLE tbl_storage_plain ALTER COLUMN c1 SET STORAGE PLAIN; ALTER TABLE tbl_storage_plain ALTER COLUMN c2 SET STORAGE PLAIN; -- -- insert data -- INSERT INTO tbl_cluster VALUES(1, '2008-12-31 10:00:00', 'admin'); INSERT INTO tbl_cluster VALUES(2, '2008-01-01 00:00:00', 'king'); INSERT INTO tbl_cluster VALUES(3, '2008-03-04 12:00:00', 'joker'); INSERT INTO tbl_cluster VALUES(4, '2008-03-05 15:00:00', 'queen'); INSERT INTO tbl_cluster VALUES(5, '2008-01-01 00:30:00', sqrt(2::numeric(1000,999))::text || sqrt(3::numeric(1000,999))::text); INSERT INTO tbl_only_pkey VALUES(1, 'abc'); INSERT INTO tbl_only_pkey VALUES(2, 'def'); INSERT INTO tbl_only_ckey VALUES(1, '2008-01-01 00:00:00', 'abc'); INSERT INTO tbl_only_ckey VALUES(2, '2008-02-01 00:00:00', 'def'); INSERT INTO tbl_gistkey VALUES(1, '<(1,2),3>'); INSERT INTO tbl_gistkey VALUES(2, '<(4,5),6>'); INSERT INTO tbl_with_dropped_column VALUES('d1', 'c1', 2, 'd2', 'c2', 'd3'); INSERT INTO tbl_with_dropped_column VALUES('d1', 'c1', 1, 'd2', 'c2', 'd3'); ALTER TABLE tbl_with_dropped_column DROP COLUMN d1; ALTER TABLE tbl_with_dropped_column DROP COLUMN d2; ALTER TABLE tbl_with_dropped_column DROP COLUMN d3; ALTER TABLE tbl_with_dropped_column ADD COLUMN c3 text; CREATE VIEW view_for_dropped_column AS SELECT * FROM tbl_with_dropped_column; INSERT INTO tbl_with_dropped_toast VALUES(1, 10, 'abc'); INSERT INTO tbl_with_dropped_toast VALUES(2, 20, sqrt(2::numeric(1000,999))::text || sqrt(3::numeric(1000,999))::text); ALTER TABLE tbl_with_dropped_toast DROP COLUMN t; INSERT INTO tbl_badindex VALUES(1, 10); INSERT INTO tbl_badindex VALUES(2, 10); -- insert data that is always stored into the toast table if column type is extended. SELECT setseed(0); INSERT INTO tbl_with_mod_column_storage SELECT 1, array_to_string(ARRAY(SELECT chr((random() * (127 - 32) + 32)::int) FROM generate_series(1, 3 * 1024) code), ''); setseed --------- (1 row) --- This will fail \set VERBOSITY terse CREATE UNIQUE INDEX CONCURRENTLY idx_badindex_n ON tbl_badindex (n); ERROR: could not create unique index "idx_badindex_n" INSERT INTO tbl_idxopts VALUES (0, 'abc'), (1, 'aaa'), (2, NULL), (3, 'bbb'); -- Insert no-ordered data INSERT INTO tbl_order SELECT generate_series(100, 51, -1); CLUSTER tbl_order USING tbl_order_pkey; INSERT INTO tbl_order SELECT generate_series(50, 1, -1); -- -- before -- SELECT * FROM tbl_with_dropped_column; c1 | id | c2 | c3 ----+----+----+---- c1 | 2 | c2 | c1 | 1 | c2 | (2 rows) SELECT * FROM view_for_dropped_column; c1 | id | c2 | c3 ----+----+----+---- c1 | 2 | c2 | c1 | 1 | c2 | (2 rows) SELECT * FROM tbl_with_dropped_toast; i | j ---+---- 1 | 10 2 | 20 (2 rows) VACUUM FULL tbl_storage_plain; pg_repack-ver_1.5.0/regress/expected/tablespace.out000066400000000000000000000234631452746007700224570ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; spcname --------- testts (1 row) -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ---------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE pg_default WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE pg_default CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE pg_default (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace -------------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE pg_default WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE pg_default CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE pg_default (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE "foo bar" (3 rows) -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) SELECT * from testts1 order by id; id | data ----+------ 1 | a 2 | b 3 | c (3 rows) -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1 | testts testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (4 rows) -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx ERROR: cannot specify --moveidx (-S) without --tablespace (-s) \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S ERROR: cannot specify --moveidx (-S) without --tablespace (-s) -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx INFO: repacking table "public.testts1" --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ------------------+--------- testts1_pkey | testts testts1_with_idx | testts (2 rows) --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey ERROR: cannot specify --index (-i) and --table (-t) pg_repack-ver_1.5.0/regress/expected/tablespace_1.out000066400000000000000000000231271452746007700226740ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; spcname --------- testts (1 row) -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ------------------------------------------------------------------------------------ CREATE INDEX index_OID ON repack.table_OID USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ---------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor='80') (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE "foo bar" (3 rows) -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) SELECT * from testts1 order by id; id | data ----+------ 1 | a 2 | b 3 | c (3 rows) -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1 | testts testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (4 rows) -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx ERROR: cannot specify --moveidx (-S) without --tablespace (-s) \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S ERROR: cannot specify --moveidx (-S) without --tablespace (-s) -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx INFO: repacking table "public.testts1" --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ------------------+--------- testts1_pkey | testts testts1_with_idx | testts (2 rows) --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey ERROR: cannot specify --index (-i) and --table (-t) pg_repack-ver_1.5.0/regress/expected/tablespace_2.out000066400000000000000000000235711452746007700227000ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; spcname --------- testts (1 row) -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ---------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE pg_default WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE pg_default CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE pg_default (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE pg_default WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE pg_default CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE pg_default (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace -------------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE foo CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE "foo bar" (3 rows) -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) SELECT * from testts1 order by id; id | data ----+------ 1 | a 2 | b 3 | c (3 rows) -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1 | testts testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (4 rows) -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx ERROR: cannot specify --moveidx (-S) without --tablespace (-s) \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S ERROR: cannot specify --moveidx (-S) without --tablespace (-s) -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx INFO: repacking table "public.testts1" --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ------------------+--------- testts1_pkey | testts testts1_with_idx | testts (2 rows) --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey ERROR: cannot specify --index (-i) and --table (-t) pg_repack-ver_1.5.0/regress/expected/tablespace_3.out000066400000000000000000000230771452746007700227020ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; spcname --------- testts (1 row) -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ---------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor=80) (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor=80) TABLESPACE foo (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace -------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor=80) (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ----------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) TABLESPACE foo CREATE INDEX CONCURRENTLY index_OID ON testts1 USING btree (id) WITH (fillfactor=80) TABLESPACE foo (3 rows) -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE "foo bar" (3 rows) -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) SELECT * from testts1 order by id; id | data ----+------ 1 | a 2 | b 3 | c (3 rows) -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1 | testts testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (4 rows) -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx ERROR: cannot specify --moveidx (-S) without --tablespace (-s) \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S ERROR: cannot specify --moveidx (-S) without --tablespace (-s) -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx INFO: repacking table "public.testts1" --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ------------------+--------- testts1_pkey | testts testts1_with_idx | testts (2 rows) --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey ERROR: cannot specify --index (-i) and --table (-t) pg_repack-ver_1.5.0/regress/expected/tablespace_4.out000066400000000000000000000232351452746007700226770ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; spcname --------- testts (1 row) -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ------------------------------------------------------------------------------------ CREATE INDEX index_OID ON repack.table_OID USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE foo CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace ----------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) WITH (fillfactor='80') (3 rows) SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace -------------------------------------------------------------------------------------------------------------- CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE foo WHERE (id > 0) CREATE UNIQUE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) TABLESPACE foo CREATE INDEX CONCURRENTLY index_OID ON public.testts1 USING btree (id) WITH (fillfactor='80') TABLESPACE foo (3 rows) -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; regexp_replace --------------------------------------------------------------------------------------------------------- CREATE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" WHERE (id > 0) CREATE UNIQUE INDEX index_OID ON repack.table_OID USING btree (id) TABLESPACE "foo bar" CREATE INDEX index_OID ON repack.table_OID USING btree (id) WITH (fillfactor='80') TABLESPACE "foo bar" (3 rows) -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) SELECT * from testts1 order by id; id | data ----+------ 1 | a 2 | b 3 | c (3 rows) -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- testts1 | testts (1 row) -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx INFO: repacking table "public.testts1" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1 | testts testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (4 rows) -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx ERROR: cannot specify --moveidx (-S) without --tablespace (-s) \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S ERROR: cannot specify --moveidx (-S) without --tablespace (-s) -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx INFO: repacking table "public.testts1" --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------------------+--------- testts1_partial_idx | testts testts1_pkey | testts testts1_with_idx | testts (3 rows) --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default INFO: repacking indexes of "testts1" INFO: repacking index "public.testts1_partial_idx" INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname --------------+--------- testts1_pkey | testts (1 row) --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default INFO: repacking index "public.testts1_pkey" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ---------+--------- (0 rows) --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts INFO: repacking index "public.testts1_pkey" INFO: repacking index "public.testts1_with_idx" SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; relname | spcname ------------------+--------- testts1_pkey | testts testts1_with_idx | testts (2 rows) --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey ERROR: cannot specify --index (-i) and --table (-t) pg_repack-ver_1.5.0/regress/expected/trigger.out000066400000000000000000000025371452746007700220160ustar00rootroot00000000000000-- -- repack.repack_trigger tests -- CREATE TABLE trigger_t1 (a int, b int, primary key (a, b)); CREATE INDEX trigger_t1_idx ON trigger_t1 (a, b); SELECT create_trigger FROM repack.tables WHERE relname = 'public.trigger_t1'; create_trigger ---------------------------------------------------------------------------------------------------------------------------------------------------- CREATE TRIGGER repack_trigger AFTER INSERT OR DELETE OR UPDATE ON public.trigger_t1 FOR EACH ROW EXECUTE PROCEDURE repack.repack_trigger('a', 'b') (1 row) SELECT oid AS t1_oid FROM pg_catalog.pg_class WHERE relname = 'trigger_t1' \gset CREATE TYPE repack.pk_:t1_oid AS (a integer, b integer); CREATE TABLE repack.log_:t1_oid (id bigserial PRIMARY KEY, pk repack.pk_:t1_oid, row public.trigger_t1); CREATE TRIGGER repack_trigger AFTER INSERT OR DELETE OR UPDATE ON trigger_t1 FOR EACH ROW EXECUTE PROCEDURE repack.repack_trigger('a', 'b'); INSERT INTO trigger_t1 VALUES (111, 222); UPDATE trigger_t1 SET a=333, b=444 WHERE a = 111; DELETE FROM trigger_t1 WHERE a = 333; SELECT * FROM repack.log_:t1_oid; id | pk | row ----+-----------+----------- 1 | | (111,222) 2 | (111,222) | (333,444) 3 | (333,444) | (3 rows) pg_repack-ver_1.5.0/regress/sql/000077500000000000000000000000001452746007700166115ustar00rootroot00000000000000pg_repack-ver_1.5.0/regress/sql/after-schema.sql000066400000000000000000000002611452746007700216700ustar00rootroot00000000000000-- -- tables schema after running repack -- \d tbl_cluster \d tbl_gistkey \d tbl_only_ckey \d tbl_only_pkey \d tbl_with_dropped_column \d tbl_with_dropped_toast \d tbl_idxopts pg_repack-ver_1.5.0/regress/sql/error-on-invalid-idx.sql000066400000000000000000000004031452746007700233000ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --error-on-invalid-index \! pg_repack --dbname=contrib_regression --table=tbl_badindex --error-on-invalid-index \! pg_repack --dbname=contrib_regression --error-on-invalid-index pg_repack-ver_1.5.0/regress/sql/get_order_by.sql000066400000000000000000000032751452746007700220050ustar00rootroot00000000000000-- -- pg_repack issue #3 -- CREATE TABLE issue3_1 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_1_idx ON issue3_1 (col1, col2 DESC); SELECT repack.get_order_by('issue3_1_idx'::regclass::oid, 'issue3_1'::regclass::oid); \! pg_repack --dbname=contrib_regression --table=issue3_1 CREATE TABLE issue3_2 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_2_idx ON issue3_2 (col1 DESC, col2 text_pattern_ops); SELECT repack.get_order_by('issue3_2_idx'::regclass::oid, 'issue3_2'::regclass::oid); \! pg_repack --dbname=contrib_regression --table=issue3_2 CREATE TABLE issue3_3 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_3_idx ON issue3_3 (col1 DESC, col2 DESC); SELECT repack.get_order_by('issue3_3_idx'::regclass::oid, 'issue3_3'::regclass::oid); \! pg_repack --dbname=contrib_regression --table=issue3_3 CREATE TABLE issue3_4 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_4_idx ON issue3_4 (col1 NULLS FIRST, col2 text_pattern_ops DESC NULLS LAST); SELECT repack.get_order_by('issue3_4_idx'::regclass::oid, 'issue3_4'::regclass::oid); \! pg_repack --dbname=contrib_regression --table=issue3_4 CREATE TABLE issue3_5 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue3_5_idx ON issue3_5 (col1 DESC NULLS FIRST, col2 COLLATE "POSIX" DESC); SELECT repack.get_order_by('issue3_5_idx'::regclass::oid, 'issue3_5'::regclass::oid); \! pg_repack --dbname=contrib_regression --table=issue3_5 -- -- pg_repack issue #321 -- CREATE TABLE issue321 (col1 int NOT NULL, col2 text NOT NULL); CREATE UNIQUE INDEX issue321_idx ON issue321 (col1); SELECT repack.get_order_by('issue321_idx'::regclass::oid, 1); SELECT repack.get_order_by(1, 1); pg_repack-ver_1.5.0/regress/sql/init-extension.sql000066400000000000000000000001321452746007700223030ustar00rootroot00000000000000SET client_min_messages = warning; CREATE EXTENSION pg_repack; RESET client_min_messages; pg_repack-ver_1.5.0/regress/sql/nosuper.sql000066400000000000000000000007421452746007700210300ustar00rootroot00000000000000-- -- no superuser check -- SET client_min_messages = error; DROP ROLE IF EXISTS nosuper; SET client_min_messages = warning; CREATE ROLE nosuper WITH LOGIN; -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_cluster --no-superuser-check -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper -- => ERROR \! pg_repack --dbname=contrib_regression --table=tbl_cluster --username=nosuper --no-superuser-check DROP ROLE IF EXISTS nosuper; pg_repack-ver_1.5.0/regress/sql/repack-check.sql000066400000000000000000000146321452746007700216600ustar00rootroot00000000000000SET client_min_messages = warning; SELECT col1, to_char("time", 'YYYY-MM-DD HH24:MI:SS'), ","")" FROM tbl_cluster ORDER BY 1, 2; SELECT * FROM tbl_only_ckey ORDER BY 1; SELECT * FROM tbl_only_pkey ORDER BY 1; SELECT * FROM tbl_gistkey ORDER BY 1; SET enable_seqscan = on; SET enable_indexscan = off; SELECT * FROM tbl_with_dropped_column ; SELECT * FROM view_for_dropped_column ORDER BY 1, 2; SELECT * FROM tbl_with_dropped_toast; SET enable_seqscan = off; SET enable_indexscan = on; SELECT * FROM tbl_with_dropped_column ORDER BY 1, 2; SELECT * FROM view_for_dropped_column; SELECT * FROM tbl_with_dropped_toast; RESET enable_seqscan; RESET enable_indexscan; -- check if storage option for both table and TOAST table didn't go away. SELECT CASE relkind WHEN 'r' THEN relname WHEN 't' THEN 'toast_table' END as table, reloptions FROM pg_class WHERE relname = 'tbl_with_toast' OR relname = 'pg_toast_' || 'tbl_with_toast'::regclass::oid ORDER BY 1; SELECT pg_relation_size(reltoastrelid) = 0 as check_toast_rel_size FROM pg_class WHERE relname = 'tbl_with_mod_column_storage'; -- -- check broken links or orphan toast relations -- SELECT oid, relname FROM pg_class WHERE relkind = 't' AND oid NOT IN (SELECT reltoastrelid FROM pg_class WHERE relkind = 'r'); SELECT oid, relname FROM pg_class WHERE relkind = 'r' AND reltoastrelid <> 0 AND reltoastrelid NOT IN (SELECT oid FROM pg_class WHERE relkind = 't'); -- check columns options SELECT attname, attstattarget, attoptions FROM pg_attribute WHERE attrelid = 'tbl_idxopts'::regclass AND attnum > 0 ORDER BY attnum; -- -- NOT NULL UNIQUE -- CREATE TABLE tbl_nn (col1 int NOT NULL, col2 int NOT NULL); CREATE TABLE tbl_uk (col1 int NOT NULL, col2 int , UNIQUE(col1, col2)); CREATE TABLE tbl_nn_uk (col1 int NOT NULL, col2 int NOT NULL, UNIQUE(col1, col2)); CREATE TABLE tbl_pk_uk (col1 int NOT NULL, col2 int NOT NULL, PRIMARY KEY(col1, col2), UNIQUE(col2, col1)); CREATE TABLE tbl_nn_puk (col1 int NOT NULL, col2 int NOT NULL); CREATE UNIQUE INDEX tbl_nn_puk_pcol1_idx ON tbl_nn_puk(col1) WHERE col1 < 10; \! pg_repack --dbname=contrib_regression --table=tbl_nn -- => WARNING \! pg_repack --dbname=contrib_regression --table=tbl_uk -- => WARNING \! pg_repack --dbname=contrib_regression --table=tbl_nn_uk -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_pk_uk -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_pk_uk --only-indexes -- => OK \! pg_repack --dbname=contrib_regression --table=tbl_nn_puk -- => WARNING -- -- Triggers handling -- CREATE FUNCTION trgtest() RETURNS trigger AS $$BEGIN RETURN NEW; END$$ LANGUAGE plpgsql; CREATE TABLE trg1 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger_1 AFTER UPDATE ON trg1 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg1 CREATE TABLE trg2 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger AFTER UPDATE ON trg2 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg2 CREATE TABLE trg3 (id integer PRIMARY KEY); CREATE TRIGGER repack_trigger_1 BEFORE UPDATE ON trg3 FOR EACH ROW EXECUTE PROCEDURE trgtest(); \! pg_repack --dbname=contrib_regression --table=trg3 -- -- Table re-organization using specific column -- -- reorganize table using cluster key. Sort in ascending order. \! pg_repack --dbname=contrib_regression --table=tbl_order SELECT ctid, c FROM tbl_order WHERE ctid <= '(0,10)'; -- reorganize table using specific column order. Sort in descending order. \! pg_repack --dbname=contrib_regression --table=tbl_order -o "c DESC" SELECT ctid, c FROM tbl_order WHERE ctid <= '(0,10)'; -- -- Dry run -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --dry-run -- Test --schema -- CREATE SCHEMA test_schema1; CREATE TABLE test_schema1.tbl1 (id INTEGER PRIMARY KEY); CREATE TABLE test_schema1.tbl2 (id INTEGER PRIMARY KEY); CREATE SCHEMA test_schema2; CREATE TABLE test_schema2.tbl1 (id INTEGER PRIMARY KEY); CREATE TABLE test_schema2.tbl2 (id INTEGER PRIMARY KEY); -- => OK \! pg_repack --dbname=contrib_regression --schema=test_schema1 -- => OK \! pg_repack --dbname=contrib_regression --schema=test_schema1 --schema=test_schema2 -- => ERROR \! pg_repack --dbname=contrib_regression --schema=test_schema1 --table=tbl1 -- => ERROR \! pg_repack --dbname=contrib_regression --all --schema=test_schema1 -- -- don't kill backend -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --no-kill-backend -- -- exclude extension check -- CREATE SCHEMA exclude_extension_schema; CREATE TABLE exclude_extension_schema.tbl(val integer primary key); -- => ERROR \! pg_repack --dbname=contrib_regression --table=dummy_table --exclude-extension=dummy_extension -- => ERROR \! pg_repack --dbname=contrib_regression --table=dummy_table --exclude-extension=dummy_extension -x -- => ERROR \! pg_repack --dbname=contrib_regression --index=dummy_index --exclude-extension=dummy_extension -- => OK \! pg_repack --dbname=contrib_regression --schema=exclude_extension_schema --exclude-extension=dummy_extension -- => OK \! pg_repack --dbname=contrib_regression --schema=exclude_extension_schema --exclude-extension=dummy_extension --exclude-extension=dummy_extension -- -- table inheritance check -- CREATE TABLE parent_a(val integer primary key); CREATE TABLE child_a_1(val integer primary key) INHERITS(parent_a); CREATE TABLE child_a_2(val integer primary key) INHERITS(parent_a); CREATE TABLE parent_b(val integer primary key); CREATE TABLE child_b_1(val integer primary key) INHERITS(parent_b); CREATE TABLE child_b_2(val integer primary key) INHERITS(parent_b); -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_index --index=dummy_index -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table --schema=dummy_schema -- => ERROR \! pg_repack --dbname=contrib_regression --parent-table=dummy_table --all -- => OK \! pg_repack --dbname=contrib_regression --table=parent_a --parent-table=parent_b -- => OK \! pg_repack --dbname=contrib_regression --parent-table=parent_a --parent-table=parent_b -- => OK \! pg_repack --dbname=contrib_regression --table=parent_a --parent-table=parent_b --only-indexes -- => OK \! pg_repack --dbname=contrib_regression --parent-table=parent_a --parent-table=parent_b --only-indexes -- -- Switch threshold -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster --switch-threshold 200 pg_repack-ver_1.5.0/regress/sql/repack-run.sql000066400000000000000000000002701452746007700214000ustar00rootroot00000000000000-- -- do repack -- \! pg_repack --dbname=contrib_regression --table=tbl_cluster \! pg_repack --dbname=contrib_regression --table=tbl_badindex \! pg_repack --dbname=contrib_regression pg_repack-ver_1.5.0/regress/sql/repack-setup.sql000066400000000000000000000112271452746007700217400ustar00rootroot00000000000000SET client_min_messages = warning; -- -- create table. -- CREATE TABLE tbl_cluster ( col1 int, "time" timestamp, ","")" text, PRIMARY KEY (","")", col1) WITH (fillfactor = 75) ) WITH (fillfactor = 70); CREATE INDEX ","") cluster" ON tbl_cluster ("time", length(","")"), ","")" text_pattern_ops) WITH (fillfactor = 75); ALTER TABLE tbl_cluster CLUSTER ON ","") cluster"; CREATE TABLE tbl_only_pkey ( col1 int PRIMARY KEY, ","")" text ); CREATE TABLE tbl_only_ckey ( col1 int, col2 timestamp, ","")" text ) WITH (fillfactor = 70); CREATE INDEX cidx_only_ckey ON tbl_only_ckey (col2, ","")"); ALTER TABLE tbl_only_ckey CLUSTER ON cidx_only_ckey; CREATE TABLE tbl_gistkey ( id integer PRIMARY KEY, c circle ); CREATE INDEX cidx_circle ON tbl_gistkey USING gist (c); ALTER TABLE tbl_gistkey CLUSTER ON cidx_circle; CREATE TABLE tbl_with_dropped_column ( d1 text, c1 text, id integer PRIMARY KEY, d2 text, c2 text, d3 text ); ALTER INDEX tbl_with_dropped_column_pkey SET (fillfactor = 75); ALTER TABLE tbl_with_dropped_column CLUSTER ON tbl_with_dropped_column_pkey; CREATE INDEX idx_c1c2 ON tbl_with_dropped_column (c1, c2) WITH (fillfactor = 75); CREATE INDEX idx_c2c1 ON tbl_with_dropped_column (c2, c1); CREATE TABLE tbl_with_dropped_toast ( i integer, j integer, t text, PRIMARY KEY (i, j) ); ALTER TABLE tbl_with_dropped_toast CLUSTER ON tbl_with_dropped_toast_pkey; CREATE TABLE tbl_badindex ( id integer PRIMARY KEY, n integer ); CREATE TABLE tbl_idxopts ( i integer PRIMARY KEY, t text ); CREATE INDEX idxopts_t ON tbl_idxopts (t DESC NULLS LAST) WHERE (t != 'aaa'); -- Use this table to play with attribute options too ALTER TABLE tbl_idxopts ALTER i SET STATISTICS 1; ALTER TABLE tbl_idxopts ALTER t SET (n_distinct = -0.5); CREATE TABLE tbl_with_toast ( i integer PRIMARY KEY, c text ); ALTER TABLE tbl_with_toast SET (AUTOVACUUM_VACUUM_SCALE_FACTOR = 30, AUTOVACUUM_VACUUM_THRESHOLD = 300); ALTER TABLE tbl_with_toast SET (TOAST.AUTOVACUUM_VACUUM_SCALE_FACTOR = 40, TOAST.AUTOVACUUM_VACUUM_THRESHOLD = 400); CREATE TABLE tbl_with_mod_column_storage ( id integer PRIMARY KEY, c text ); ALTER TABLE tbl_with_mod_column_storage ALTER c SET STORAGE MAIN; CREATE TABLE tbl_order (c int primary key); CREATE TABLE tbl_storage_plain (c1 int primary key, c2 text); ALTER TABLE tbl_storage_plain ALTER COLUMN c1 SET STORAGE PLAIN; ALTER TABLE tbl_storage_plain ALTER COLUMN c2 SET STORAGE PLAIN; -- -- insert data -- INSERT INTO tbl_cluster VALUES(1, '2008-12-31 10:00:00', 'admin'); INSERT INTO tbl_cluster VALUES(2, '2008-01-01 00:00:00', 'king'); INSERT INTO tbl_cluster VALUES(3, '2008-03-04 12:00:00', 'joker'); INSERT INTO tbl_cluster VALUES(4, '2008-03-05 15:00:00', 'queen'); INSERT INTO tbl_cluster VALUES(5, '2008-01-01 00:30:00', sqrt(2::numeric(1000,999))::text || sqrt(3::numeric(1000,999))::text); INSERT INTO tbl_only_pkey VALUES(1, 'abc'); INSERT INTO tbl_only_pkey VALUES(2, 'def'); INSERT INTO tbl_only_ckey VALUES(1, '2008-01-01 00:00:00', 'abc'); INSERT INTO tbl_only_ckey VALUES(2, '2008-02-01 00:00:00', 'def'); INSERT INTO tbl_gistkey VALUES(1, '<(1,2),3>'); INSERT INTO tbl_gistkey VALUES(2, '<(4,5),6>'); INSERT INTO tbl_with_dropped_column VALUES('d1', 'c1', 2, 'd2', 'c2', 'd3'); INSERT INTO tbl_with_dropped_column VALUES('d1', 'c1', 1, 'd2', 'c2', 'd3'); ALTER TABLE tbl_with_dropped_column DROP COLUMN d1; ALTER TABLE tbl_with_dropped_column DROP COLUMN d2; ALTER TABLE tbl_with_dropped_column DROP COLUMN d3; ALTER TABLE tbl_with_dropped_column ADD COLUMN c3 text; CREATE VIEW view_for_dropped_column AS SELECT * FROM tbl_with_dropped_column; INSERT INTO tbl_with_dropped_toast VALUES(1, 10, 'abc'); INSERT INTO tbl_with_dropped_toast VALUES(2, 20, sqrt(2::numeric(1000,999))::text || sqrt(3::numeric(1000,999))::text); ALTER TABLE tbl_with_dropped_toast DROP COLUMN t; INSERT INTO tbl_badindex VALUES(1, 10); INSERT INTO tbl_badindex VALUES(2, 10); -- insert data that is always stored into the toast table if column type is extended. SELECT setseed(0); INSERT INTO tbl_with_mod_column_storage SELECT 1, array_to_string(ARRAY(SELECT chr((random() * (127 - 32) + 32)::int) FROM generate_series(1, 3 * 1024) code), ''); --- This will fail \set VERBOSITY terse CREATE UNIQUE INDEX CONCURRENTLY idx_badindex_n ON tbl_badindex (n); INSERT INTO tbl_idxopts VALUES (0, 'abc'), (1, 'aaa'), (2, NULL), (3, 'bbb'); -- Insert no-ordered data INSERT INTO tbl_order SELECT generate_series(100, 51, -1); CLUSTER tbl_order USING tbl_order_pkey; INSERT INTO tbl_order SELECT generate_series(50, 1, -1); -- -- before -- SELECT * FROM tbl_with_dropped_column; SELECT * FROM view_for_dropped_column; SELECT * FROM tbl_with_dropped_toast; VACUUM FULL tbl_storage_plain;pg_repack-ver_1.5.0/regress/sql/tablespace.sql000066400000000000000000000121061452746007700214350ustar00rootroot00000000000000SET client_min_messages = warning; -- -- Tablespace features tests -- -- Note: in order to pass this test you must create a tablespace called 'testts' -- SELECT spcname FROM pg_tablespace WHERE spcname = 'testts'; -- If the query above failed you must create the 'testts' tablespace; CREATE TABLE testts1 (id serial primary key, data text); CREATE INDEX testts1_partial_idx on testts1 (id) where (id > 0); CREATE INDEX testts1_with_idx on testts1 (id) with (fillfactor=80); INSERT INTO testts1 (data) values ('a'); INSERT INTO testts1 (data) values ('b'); INSERT INTO testts1 (data) values ('c'); -- check the indexes definitions SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, NULL, true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo', true), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; -- Test that a tablespace is quoted as an identifier SELECT regexp_replace( repack.repack_indexdef(indexrelid, 'testts1'::regclass, 'foo bar', false), '_[0-9]+', '_OID', 'g') FROM pg_index i join pg_class c ON c.oid = indexrelid WHERE indrelid = 'testts1'::regclass ORDER BY relname; -- can move the tablespace from default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; SELECT * from testts1 order by id; -- tablespace stays where it is \! pg_repack --dbname=contrib_regression --no-order --table=testts1 SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; -- can move the ts back to default \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -s pg_default SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; -- can move the table together with the indexes \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --tablespace testts --moveidx SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; -- can't specify --moveidx without --tablespace \! pg_repack --dbname=contrib_regression --no-order --table=testts1 --moveidx \! pg_repack --dbname=contrib_regression --no-order --table=testts1 -S -- not broken with order \! pg_repack --dbname=contrib_regression -o id --table=testts1 --tablespace pg_default --moveidx --move all indexes of the table to a tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=testts SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --all indexes of tablespace remain in same tablespace \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --move all indexes of the table to pg_default \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --tablespace=pg_default SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --move one index to a tablespace \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=testts SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --index tablespace stays as is \! pg_repack --dbname=contrib_regression --index=testts1_pkey SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --move index to pg_default \! pg_repack --dbname=contrib_regression --index=testts1_pkey --tablespace=pg_default SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --using multiple --index option \! pg_repack --dbname=contrib_regression --index=testts1_pkey --index=testts1_with_idx --tablespace=testts SELECT relname, spcname FROM pg_class JOIN pg_tablespace ts ON ts.oid = reltablespace WHERE relname ~ '^testts1' ORDER BY relname; --using --indexes-only and --index option together \! pg_repack --dbname=contrib_regression --table=testts1 --only-indexes --index=testts1_pkey pg_repack-ver_1.5.0/regress/sql/trigger.sql000066400000000000000000000014161452746007700207770ustar00rootroot00000000000000-- -- repack.repack_trigger tests -- CREATE TABLE trigger_t1 (a int, b int, primary key (a, b)); CREATE INDEX trigger_t1_idx ON trigger_t1 (a, b); SELECT create_trigger FROM repack.tables WHERE relname = 'public.trigger_t1'; SELECT oid AS t1_oid FROM pg_catalog.pg_class WHERE relname = 'trigger_t1' \gset CREATE TYPE repack.pk_:t1_oid AS (a integer, b integer); CREATE TABLE repack.log_:t1_oid (id bigserial PRIMARY KEY, pk repack.pk_:t1_oid, row public.trigger_t1); CREATE TRIGGER repack_trigger AFTER INSERT OR DELETE OR UPDATE ON trigger_t1 FOR EACH ROW EXECUTE PROCEDURE repack.repack_trigger('a', 'b'); INSERT INTO trigger_t1 VALUES (111, 222); UPDATE trigger_t1 SET a=333, b=444 WHERE a = 111; DELETE FROM trigger_t1 WHERE a = 333; SELECT * FROM repack.log_:t1_oid; pg_repack-ver_1.5.0/regress/travis_prepare.sh000077500000000000000000000043741452746007700214070ustar00rootroot00000000000000#!/bin/bash set -e -x export PACKAGE="$PGVER" export PGDIR="/usr/lib/postgresql/$PACKAGE" export CONFDIR="/etc/postgresql/$PACKAGE/main" export DATADIR="/var/lib/postgresql/$PACKAGE/main" export PGBIN="$PGDIR/bin" export PATH="$PGBIN:$PATH" # This also stops the server currently running on port 5432 sudo apt-get remove -y libpq5 # Match libpq and server-dev packages # See https://github.com/reorg/pg_repack/issues/63 sudo sh -c 'echo "deb [arch=amd64] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main ${PGVER}" > /etc/apt/sources.list.d/pgdg.list' # Import the repository signing key: wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - sudo apt-get update # This might be a moving target, but it currently fails. 13 could start # failing in the future instead. # Some versions break if this is not specified (9.4 for sure, maybe 9.6) if [[ "$PGVER" = "9.4" ]]; then sudo apt-get install -y "libpq5=${PGVER}*" "libpq-dev=${PGVER}*" sudo apt-mark hold libpq5 fi # missing build dependency by postgresql-server-dev if [[ "$PGVER" -ge "14" ]]; then sudo apt-get install -y liblz4-dev fi if ! sudo apt-get install -y \ postgresql-$PGVER \ postgresql-client-$PGVER \ postgresql-server-dev-$PGVER then sudo systemctl status postgresql.service -l exit 1 fi # ensure PostgreSQL is running on 5432 port with proper auth sudo sed -i \ 's/\(^local[[:space:]]\+all[[:space:]]\+all[[:space:]]\+\).*/\1trust/' \ "$CONFDIR/pg_hba.conf" sudo bash -c "echo 'port=5432' >> $CONFDIR/postgresql.conf" sudo service postgresql restart $PGVER # ensure travis user exists. May be missed if the database was not provided by Travis userexists=`sudo -u postgres "$PGBIN/psql" -tc "select count(*) from pg_catalog.pg_user where usename='travis';"` if [ ${userexists} -eq 0 ]; then sudo -u postgres "$PGBIN/psql" -c "create user travis superuser" fi # Go somewhere else or sudo will fail cd / # Already started because of installing posgresql-$PGVER # sudo -u postgres "$PGBIN/pg_ctl" -w -l /dev/null -D "$CONFDIR" start sudo -u postgres mkdir -p /var/lib/postgresql/testts sudo -u postgres "$PGBIN/psql" \ -c "create tablespace testts location '/var/lib/postgresql/testts'" # Go back to the build dir cd - pg_repack-ver_1.5.0/regress/travis_test.sh000077500000000000000000000002011452746007700207110ustar00rootroot00000000000000#!/bin/bash set -x export PG_REGRESS_DIFF_OPTS=-u if ! make installcheck; then cat regress/regression.diffs exit 1 fi