pax_global_header00006660000000000000000000000064141514231700014510gustar00rootroot0000000000000052 comment=aef974d456254080bb51b2a12920be37a271982e pglogical-REL2_4_1/000077500000000000000000000000001415142317000141625ustar00rootroot00000000000000pglogical-REL2_4_1/.gitignore000066400000000000000000000003221415142317000161470ustar00rootroot00000000000000results regression_output tmp_check pglogical_create_subscriber .vimrc *.o *.so *.gcda *.gcno *~ pglogical.control pglogical-*.tar.bz2 pglogical-*.tar.bz2.md5 pglogical-*.tar.bz2.asc valgrind-*-*.log /postgres pglogical-REL2_4_1/.gitmodules000066400000000000000000000001511415142317000163340ustar00rootroot00000000000000[submodule "pglogical_dump"] path = pglogical_dump url = git@github.com:2ndQuadrant/pglogical_dump.git pglogical-REL2_4_1/COPYRIGHT000066400000000000000000000022501415142317000154540ustar00rootroot00000000000000PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pglogical-REL2_4_1/FAQ.md000066400000000000000000000051061415142317000151150ustar00rootroot00000000000000# Frequently Asked Questions pglogical 2.0 introduces some new features, like column filter, row filter and apply delay. Some related discussion on them: ### The column filter * What happens if we column filter on a table with OIDS? Can we filter on xmin? - For a table with OIDs, column filter works fine. No, we cannot filter system columns like oid, xmin. * What happens if a column being filtered on is dropped? - Currently in pglogical replication, even primary key can be dropped at provider. If a column being filtered on is dropped, at provider it is removed from the column filter too. This can be seen using `pglogical.show_repset_table_info()`. Columns at subscriber remain as is, which is correct and expected. At subscriber, in this state INSERTs replicate, but UPDATEs and DELETEs do not. * What happens if we add a column, does that automatically get included? - If a column is added at provider, it does not automatically get added to the column filter. ### The row filter * Can we create `row_filter` on table with OIDS? Can we filter on xmin? - Yes, `row_filter` works fine for table with OIDs. No, we cannot filter on system columns like xmin. * What types of function can we execute in a `row_filter`? Can we use a volatile sampling function, for example? - We can execute immutable, stable and volatile functions in a `row_filter`. Caution must be exercised with regard to writes as any expression which will do writes will throw error and stop replication. Volatile sampling function in `row_filter`: This would not work in practice as it would not get correct snapshot of the data in live system. Theoretically with static data, it works. * Can we test a JSONB datatype that includes some form of attribute filtering? - Yes, `row_filter` on attributes of JSONB datatype works fine. ### The apply delay * Does `apply_delay` include TimeZone changes, for example Daylight Savings Time? There is a similar mechanism in physical replication - `recovery_min_apply_delay`. However, if we set some interval, during the daylight savings times, we might get that interval + the time change in practice (ie instead of 1h delay you can get 2h delay because of that). This may lead to stopping and starting the database service twice per year. - Yes, `apply_delay` include TimeZone changes, for example Daylight Savings Time. Value of `apply_delay` stays the same in practice, if daylight savings time switch happens after subscription was created. However, we do not recommend running heavy workloads during switching time as pglogical replication needs some time ( ~ 5 minutes) to recover fine. pglogical-REL2_4_1/Makefile000066400000000000000000000237231415142317000156310ustar00rootroot00000000000000# contrib/pglogical/Makefile MODULE_big = pglogical EXTENSION = pglogical PGFILEDESC = "pglogical - logical replication" MODULES = pglogical_output DATA = pglogical--1.0.0.sql pglogical--1.0.0--1.0.1.sql \ pglogical--1.0.1--1.1.0.sql \ pglogical--1.1.0--1.1.1.sql pglogical--1.1.1--1.1.2.sql \ pglogical--1.1.2--1.2.0.sql \ pglogical--1.2.0--1.2.1.sql pglogical--1.2.1--1.2.2.sql \ pglogical--1.2.2--2.0.0.sql \ pglogical--2.0.0--2.0.1.sql \ pglogical--2.0.0--2.1.0.sql pglogical--2.0.1--2.1.0.sql \ pglogical--2.1.0--2.1.1.sql pglogical--2.1.1--2.2.0.sql \ pglogical--2.2.0.sql \ pglogical--2.2.0--2.2.1.sql pglogical--2.2.1.sql \ pglogical--2.2.1--2.2.2.sql pglogical--2.2.2.sql \ pglogical--2.2.2--2.3.0.sql \ pglogical--2.2.2--2.3.1.sql \ pglogical--2.3.0.sql \ pglogical--2.3.0--2.3.1.sql \ pglogical--2.3.1.sql \ pglogical--2.3.1--2.3.2.sql \ pglogical--2.3.2.sql \ pglogical--2.3.2--2.3.3.sql \ pglogical--2.3.3.sql \ pglogical--2.3.3--2.3.4.sql \ pglogical--2.3.4.sql \ pglogical--2.3.4--2.4.0.sql \ pglogical--2.4.0.sql \ pglogical--2.4.0--2.4.1.sql \ pglogical--2.4.1.sql OBJS = pglogical_apply.o pglogical_conflict.o pglogical_manager.o \ pglogical.o pglogical_node.o pglogical_relcache.o \ pglogical_repset.o pglogical_rpc.o pglogical_functions.o \ pglogical_queue.o pglogical_fe.o pglogical_worker.o \ pglogical_sync.o pglogical_sequences.o pglogical_executor.o \ pglogical_dependency.o pglogical_apply_heap.o pglogical_apply_spi.o \ pglogical_output_config.o pglogical_output_plugin.o \ pglogical_output_proto.o pglogical_proto_json.o \ pglogical_proto_native.o pglogical_monitoring.o SCRIPTS_built = pglogical_create_subscriber REGRESS = preseed infofuncs init_fail init preseed_check basic extended conflict_secondary_unique \ toasted replication_set add_table matview bidirectional primary_key \ interfaces foreign_key functions copy triggers parallel row_filter \ row_filter_sampling att_list column_filter apply_delay multiple_upstreams \ node_origin_cascade drop EXTRA_CLEAN += compat94/pglogical_compat.o compat95/pglogical_compat.o \ compat96/pglogical_compat.o compat10/pglogical_compat.o \ compat11/pglogical_compat.o compat11/pglogical_compat.bc \ compat12/pglogical_compat.o compat12/pglogical_compat.bc \ compat13/pglogical_compat.o compat13/pglogical_compat.bc \ compat14/pglogical_compat.o compat14/pglogical_compat.bc \ pglogical_create_subscriber.o # The # in #define is taken as a comment, per https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=142043 # so it must be escaped. The $ placeholders in awk must be doubled too. pglogical_version=$(shell awk '/\#define PGLOGICAL_VERSION[ \t]+\".*\"/ { print substr($$3,2,length($$3)-2) }' $(realpath $(srcdir)/pglogical.h) ) # For regression checks # http://www.postgresql.org/message-id/CAB7nPqTsR5o3g-fBi6jbsVdhfPiLFWQ_0cGU5=94Rv_8W3qvFA@mail.gmail.com # this makes "make check" give a useful error abs_top_builddir = . NO_TEMP_INSTALL = yes PG_CONFIG ?= pg_config PGVER := $(shell $(PG_CONFIG) --version | sed 's/[^0-9]//g' | cut -c 1-2) PG_CPPFLAGS += -I$(libpq_srcdir) -I$(realpath $(srcdir)/compat$(PGVER)) -Werror=implicit-function-declaration SHLIB_LINK += $(libpq) $(filter -lintl, $(LIBS)) OBJS += $(srcdir)/compat$(PGVER)/pglogical_compat.o ifeq ($(PGVER),94) DATA += compat94/pglogical_origin.control compat94/pglogical_origin--1.0.0.sql REGRESS = preseed infofuncs init preseed_check basic extended \ toasted replication_set add_table matview primary_key \ interfaces foreign_key functions copy triggers parallel \ att_list column_filter apply_delay multiple_upstreams \ node_origin_cascade drop REGRESS += --dbname=regression SCRIPTS_built += pglogical_dump/pglogical_dump SCRIPTS += pglogical_dump/pglogical_dump requires = requires=pglogical_origin control_path = $(abspath $(abs_top_builddir))/pglogical.control else DATA += pglogical_origin.control pglogical_origin--1.0.0.sql requires = control_path = $(abspath $(srcdir))/pglogical.control endif EXTRA_CLEAN += $(control_path) PGXS = $(shell $(PG_CONFIG) --pgxs) include $(PGXS) ifeq ($(PGVER),94) regresscheck: ; check: ; $(srcdir)/pglogical_dump/pg_dump.c: $(warning pglogical_dump empty, trying to fetch as submodule) git submodule init git submodule update pglogical_dump/pglogical_dump: pglogical_dump/pg_dump.c SUBDIRS += pglogical_dump else # We can't do a normal 'make check' because PGXS doesn't support # creating a temp install. We don't want to use a normal PGXS # 'installcheck' though, because it's a pain to set up a temp install # manually, with the config overrides needed. # # We compromise by using the install we're building against, installing # glogical into it, then making a temp instance. This means that 'check' # affects the target DB install. Nobody with any sense runs 'make check' # under a user with write permissions to their production PostgreSQL # install (right?) # But this is still not ideal. regresscheck: $(MKDIR_P) regression_output $(pg_regress_check) \ --temp-config ./regress-postgresql.conf \ --temp-instance=./tmp_check \ --outputdir=./regression_output \ --create-role=logical \ $(REGRESS) check: install regresscheck endif pglogical_create_subscriber: pglogical_create_subscriber.o pglogical_fe.o $(CC) $(CFLAGS) $^ $(LDFLAGS) $(LDFLAGS_EX) $(libpq_pgport) $(filter-out -lreadline, $(LIBS)) -o $@$(X) pglogical.control: pglogical.control.in pglogical.h sed 's/__PGLOGICAL_VERSION__/$(pglogical_version)/;s/__REQUIRES__/$(requires)/' $(realpath $(srcdir)/pglogical.control.in) > $(control_path) all: pglogical.control GITHASH=$(shell if [ -e .distgitrev ]; then cat .distgitrev; else git rev-parse --short HEAD; fi) dist-common: clean @if test "$(wanttag)" -eq 1 -a "`git name-rev --tags --name-only $(GITHASH)`" = "undefined"; then echo "cannot 'make dist' on untagged tree; tag it or use make git-dist"; exit 1; fi @rm -f .distgitrev .distgittag @if ! git diff-index --quiet HEAD; then echo >&2 "WARNING: git working tree has uncommitted changes to tracked files which were INCLUDED"; fi @if [ -n "`git ls-files --exclude-standard --others`" ]; then echo >&2 "WARNING: git working tree has unstaged files which were IGNORED!"; fi @echo $(GITHASH) > .distgitrev @git name-rev --tags --name-only `cat .distgitrev` > .distgittag @(git ls-tree -r -t --full-tree HEAD --name-only \ && cd pglogical_dump\ && git ls-tree -r -t --full-tree HEAD --name-only | sed 's/^/pglogical_dump\//'\ ) |\ tar cjf "${distdir}.tar.bz2" --transform="s|^|${distdir}/|" --no-recursion \ -T - .distgitrev .distgittag @echo >&2 "Prepared ${distdir}.tar.bz2 for rev=`cat .distgitrev`, tag=`cat .distgittag`" @rm -f .distgitrev .distgittag @md5sum "${distdir}.tar.bz2" > "${distdir}.tar.bz2.md5" @if test -n "$(GPGSIGNKEYS)"; then gpg -q -a -b $(shell for x in $(GPGSIGNKEYS); do echo -u $$x; done) "${distdir}.tar.bz2"; else echo "No GPGSIGNKEYS passed, not signing tarball. Pass space separated keyid list as make var to sign."; fi dist: distdir=pglogical-$(pglogical_version) dist: wanttag=1 dist: dist-common git-dist: distdir=pglogical-$(pglogical_version)_git$(GITHASH) git-dist: wanttag=0 git-dist: dist-common # runs TAP tests # PGXS doesn't support TAP tests yet. # Copy perl modules in postgresql_srcdir/src/test/perl # to postgresql_installdir/lib/pgxs/src/test/perl define prove_check rm -rf $(CURDIR)/tmp_check/log cd $(srcdir) && TESTDIR='$(CURDIR)' $(with_temp_install) PGPORT='6$(DEF_PGPORT)' PG_REGRESS='$(top_builddir)/src/test/regress/pg_regress' $(PROVE) --verbose $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(or $(PROVE_TESTS),t/*.pl) endef check_prove: $(prove_check) .PHONY: all check regresscheck pglogical.control define _pgl_create_recursive_target .PHONY: $(1)-$(2)-recurse $(1): $(1)-$(2)-recurse $(1)-$(2)-recurse: $(if $(filter check, $(3)), temp-install) $(MKDIR_P) $(2) $$(MAKE) -C $(2) -f $(abspath $(srcdir))/$(2)/Makefile VPATH=$(abspath $(srcdir))/$(2) $(3) endef $(foreach target,$(if $1,$1,$(standard_targets)),$(foreach subdir,$(if $2,$2,$(SUBDIRS)),$(eval $(call _pgl_create_recursive_target,$(target),$(subdir),$(if $3,$3,$(target)))))) # # The following hideous hack works around pg_regress's inability to inject # prefix commands by using a wrapper 'postgres' that finds the real postgres. # define VALGRIND_WRAPPER #!/bin/bash set -e -u -x # May also want --expensive-definedness-checks=yes # # Quicker runs without --track-origins=yes --read-var-info=yes # # If you don't want leak checking, use --leak-check=no # # When just doing leak checking and not looking for detailed memory error reports you don't need: # --track-origins=yes --read-var-info=yes --malloc-fill=8f --free-fill=9f # SUPP=$(POSTGRES_SRC)/src/tools/valgrind.supp # Pop top two elements from path; the first is added by pg_regress # and the next is us. function join_by { local IFS="$$1"; shift; echo "$$*"; } IFS=':' read -r -a PATHA <<< "$$PATH" export PATH=$$(join_by ":" "$${PATHA[@]:2}") NEXT_POSTGRES=$$(which postgres) if [ "$${NEXT_POSTGRES}" -ef "./valgrind/postgres" ]; then echo "ERROR: attempt to execute self" exit 1 fi echo "Running $${NEXT_POSTGRES} under Valgrind" valgrind --leak-check=full --show-leak-kinds=definite,possible,reachable --gen-suppressions=all \ --suppressions="$${SUPP}" --suppressions=`pwd`/pglogical.supp --verbose \ --time-stamp=yes --log-file=valgrind-$$$$-%p.log --trace-children=yes \ --track-origins=yes --read-var-info=yes --malloc-fill=8f --free-fill=9f \ --num-callers=30 \ postgres "$$@" endef export VALGRIND_WRAPPER valgrind-check: $(if $(POSTGRES_SRC),,$(error set Make variable POSTGRES_SRC to postgres source dir to find valgrind.supp)) $(if $(wildcard $(POSTGRES_SRC)/src/tools/valgrind.supp),,$(error missing valgrind suppressions at $(POSTGRES_SRC)/src/tools/valgrind.supp)) mkdir -p valgrind/ echo "$$VALGRIND_WRAPPER" > valgrind/postgres chmod a+x valgrind/postgres PATH=./valgrind/:$(PATH) $(MAKE) check rm valgrind/postgres pglogical-REL2_4_1/README.md000077700000000000000000000000001415142317000176432docs/README.mdustar00rootroot00000000000000pglogical-REL2_4_1/README.tests94000066400000000000000000000010421415142317000163550ustar00rootroot00000000000000On 9.4 you can't run 'make check' since we don't have any way to make a temp instance using PGXS. So you'll need to roll a 9.4 install yourself, with something like: export PATH=$HOME/pg/94/bin:$PATH export PGUSER=postgres [ -e 94test ] && pg_ctl -D 94test -m immediate -w stop rm -rf 94test initdb -D 94test -U postgres -A trust grep -v '^track_commit_timestamp' regress-postgresql.conf >> 94test/postgresql.conf cp regress-pg_hba.conf 94test/ PGPORT=5495 pg_ctl -D 94test -w -l 94test.log start PGPORT=5495 make -s clean install installcheck pglogical-REL2_4_1/compat10/000077500000000000000000000000001415142317000156065ustar00rootroot00000000000000pglogical-REL2_4_1/compat10/pglogical_compat.c000066400000000000000000000005721415142317000212620ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ pglogical-REL2_4_1/compat10/pglogical_compat.h000066400000000000000000000100771415142317000212700ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "pgstat.h" #include "catalog/indexing.h" #include "commands/trigger.h" #include "executor/executor.h" #include "replication/origin.h" #include "utils/varlena.h" #define PGLCreateTrigger CreateTrigger #define WaitLatchOrSocket(latch, wakeEvents, sock, timeout) \ WaitLatchOrSocket(latch, wakeEvents, sock, timeout, PG_WAIT_EXTENSION) #define WaitLatch(latch, wakeEvents, timeout) \ WaitLatch(latch, wakeEvents, timeout, PG_WAIT_EXTENSION) #define GetCurrentIntegerTimestamp() GetCurrentTimestamp() #define PGLDoCopy(stmt, queryString, processed) \ do \ { \ ParseState* pstate = make_parsestate(NULL); \ DoCopy(pstate, stmt, -1, 0, processed); \ free_parsestate(pstate); \ } while (false); #define pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams) \ pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams, NULL) #define CreateCommandTag(raw_parsetree) \ CreateCommandTag(raw_parsetree->stmt) #define PortalRun(portal, count, isTopLevel, dest, altdest, qc) \ PortalRun(portal, count, isTopLevel, true, dest, altdest, qc) #define ExecAlterExtensionStmt(stmt) \ ExecAlterExtensionStmt(NULL, stmt) #undef ExecEvalExpr #define ExecEvalExpr(expr, econtext, isNull, isDone) \ ((*(expr)->evalfunc) (expr, econtext, isNull)) #define Form_pg_sequence Form_pg_sequence_data #define InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, instrument_options) \ InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, NULL, instrument_options) #define ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newtuple, recheckIndexes) \ ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newtuple, recheckIndexes, NULL) #define ExecARInsertTriggers(estate, relinfo, trigtuple, recheckIndexes) \ ExecARInsertTriggers(estate, relinfo, trigtuple, recheckIndexes, NULL) #define ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple) \ ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple, NULL) #define makeDefElem(name, arg) makeDefElem(name, arg, -1) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, queryEnv, dest, qc) #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif #define IndexRelationGetNumberOfKeyAttributes(rel) RelationGetNumberOfAttributes(rel) /* deprecated in PG12, removed in PG13 */ #define table_open(r, l) heap_open(r, l) #define table_openrv(r, l) heap_openrv(r, l) #define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m) #define table_close(r, l) heap_close(r, l) /* 29c94e03c7 */ #define ExecStoreHeapTuple(tuple, slot, shouldFree) ExecStoreTuple(tuple, slot, InvalidBuffer, shouldFree) /* c2fe139c20 */ #define TableScanDesc HeapScanDesc #define table_beginscan(relation, snapshot, nkeys, keys) heap_beginscan(relation, snapshot, nkeys, keys) #define table_beginscan_catalog(relation, nkeys, keys) heap_beginscan_catalog(relation, nkeys, keys) #define table_endscan(scan) heap_endscan(scan) /* 578b229718e8 */ #define CreateTemplateTupleDesc(natts) \ CreateTemplateTupleDesc(natts, false) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/compat11/000077500000000000000000000000001415142317000156075ustar00rootroot00000000000000pglogical-REL2_4_1/compat11/pglogical_compat.c000066400000000000000000000005721415142317000212630ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ pglogical-REL2_4_1/compat11/pglogical_compat.h000066400000000000000000000114001415142317000212600ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "replication/origin.h" #include "utils/varlena.h" #define WaitLatchOrSocket(latch, wakeEvents, sock, timeout) \ WaitLatchOrSocket(latch, wakeEvents, sock, timeout, PG_WAIT_EXTENSION) #define WaitLatch(latch, wakeEvents, timeout) \ WaitLatch(latch, wakeEvents, timeout, PG_WAIT_EXTENSION) #define GetCurrentIntegerTimestamp() GetCurrentTimestamp() #define pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams) \ pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams, NULL) #define CreateCommandTag(raw_parsetree) \ CreateCommandTag(raw_parsetree->stmt) #define PortalRun(portal, count, isTopLevel, dest, altdest, qc) \ PortalRun(portal, count, isTopLevel, true, dest, altdest, qc) #define ExecAlterExtensionStmt(stmt) \ ExecAlterExtensionStmt(NULL, stmt) /* * Pg 11 adds an argument here. We don't need to special-case 2ndQPostgres * anymore because it adds a separate ExecBRDeleteTriggers2 now, so this only * handles the stock Pg11 change. */ #define ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple) \ ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple, NULL) #undef ExecEvalExpr #define ExecEvalExpr(expr, econtext, isNull, isDone) \ ((*(expr)->evalfunc) (expr, econtext, isNull)) #define Form_pg_sequence Form_pg_sequence_data #define InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, instrument_options) \ InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, NULL, instrument_options) #define ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newtuple, recheckIndexes) \ ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newtuple, recheckIndexes, NULL) #define ExecARInsertTriggers(estate, relinfo, trigtuple, recheckIndexes) \ ExecARInsertTriggers(estate, relinfo, trigtuple, recheckIndexes, NULL) #define ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple) \ ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple, NULL) #define makeDefElem(name, arg) makeDefElem(name, arg, -1) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLCreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, isInternal) \ CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, InvalidOid, InvalidOid, NULL, isInternal, false); #define PGLDoCopy(stmt, queryString, processed) \ do \ { \ ParseState* pstate = make_parsestate(NULL); \ DoCopy(pstate, stmt, -1, 0, processed); \ free_parsestate(pstate); \ } while (false); #define PGLReplicationSlotCreate(name, db_specific, persistency) ReplicationSlotCreate(name, db_specific, persistency) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif /* ad7dbee368a */ #define ExecInitExtraTupleSlot(estate) \ ExecInitExtraTupleSlot(estate, NULL) #define ACL_OBJECT_RELATION OBJECT_TABLE #define ACL_OBJECT_SEQUENCE OBJECT_SEQUENCE #define DatumGetJsonb DatumGetJsonbP #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum, tupledesc) /* deprecated in PG12, removed in PG13 */ #define table_open(r, l) heap_open(r, l) #define table_openrv(r, l) heap_openrv(r, l) #define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m) #define table_close(r, l) heap_close(r, l) /* 29c94e03c7 */ #define ExecStoreHeapTuple(tuple, slot, shouldFree) ExecStoreTuple(tuple, slot, InvalidBuffer, shouldFree) /* c2fe139c20 */ #define TableScanDesc HeapScanDesc #define table_beginscan(relation, snapshot, nkeys, keys) heap_beginscan(relation, snapshot, nkeys, keys) #define table_beginscan_catalog(relation, nkeys, keys) heap_beginscan_catalog(relation, nkeys, keys) #define table_endscan(scan) heap_endscan(scan) /* 578b229718e8 */ #define CreateTemplateTupleDesc(natts) \ CreateTemplateTupleDesc(natts, false) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/compat12/000077500000000000000000000000001415142317000156105ustar00rootroot00000000000000pglogical-REL2_4_1/compat12/pglogical_compat.c000066400000000000000000000005721415142317000212640ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ pglogical-REL2_4_1/compat12/pglogical_compat.h000066400000000000000000000101521415142317000212640ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "access/amapi.h" #include "access/heapam.h" #include "access/table.h" #include "access/tableam.h" #include "replication/origin.h" #include "utils/varlena.h" #define WaitLatchOrSocket(latch, wakeEvents, sock, timeout) \ WaitLatchOrSocket(latch, wakeEvents, sock, timeout, PG_WAIT_EXTENSION) #define WaitLatch(latch, wakeEvents, timeout) \ WaitLatch(latch, wakeEvents, timeout, PG_WAIT_EXTENSION) #define GetCurrentIntegerTimestamp() GetCurrentTimestamp() #define pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams) \ pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams, NULL) #define CreateCommandTag(raw_parsetree) \ CreateCommandTag(raw_parsetree->stmt) #define PortalRun(portal, count, isTopLevel, dest, altdest, qc) \ PortalRun(portal, count, isTopLevel, true, dest, altdest, qc) #define ExecAlterExtensionStmt(stmt) \ ExecAlterExtensionStmt(NULL, stmt) /* * Pg 11 adds an argument here. We don't need to special-case 2ndQPostgres * anymore because it adds a separate ExecBRDeleteTriggers2 now, so this only * handles the stock Pg11 change. */ #define ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple) \ ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple, NULL) #undef ExecEvalExpr #define ExecEvalExpr(expr, econtext, isNull, isDone) \ ((*(expr)->evalfunc) (expr, econtext, isNull)) #define Form_pg_sequence Form_pg_sequence_data #define InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, instrument_options) \ InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, NULL, instrument_options) #define ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes) \ ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes, NULL) #define ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes) \ ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes, NULL) #define ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple) \ ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple, NULL) #define makeDefElem(name, arg) makeDefElem(name, arg, -1) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLCreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, isInternal) \ CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, InvalidOid, InvalidOid, NULL, isInternal, false); #define PGLDoCopy(stmt, queryString, processed) \ do \ { \ ParseState* pstate = make_parsestate(NULL); \ DoCopy(pstate, stmt, -1, 0, processed); \ free_parsestate(pstate); \ } while (false); #define PGLReplicationSlotCreate(name, db_specific, persistency) ReplicationSlotCreate(name, db_specific, persistency) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif /* ad7dbee368a */ #define ExecInitExtraTupleSlot(estate) \ ExecInitExtraTupleSlot(estate, NULL, &TTSOpsHeapTuple) #define ACL_OBJECT_RELATION OBJECT_TABLE #define ACL_OBJECT_SEQUENCE OBJECT_SEQUENCE #define DatumGetJsonb DatumGetJsonbP #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum, tupledesc) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/compat13/000077500000000000000000000000001415142317000156115ustar00rootroot00000000000000pglogical-REL2_4_1/compat13/pglogical_compat.c000066400000000000000000000005721415142317000212650ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ pglogical-REL2_4_1/compat13/pglogical_compat.h000066400000000000000000000075741415142317000213030ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "access/amapi.h" #include "access/heapam.h" #include "access/table.h" #include "access/tableam.h" #include "replication/origin.h" #include "utils/varlena.h" #define WaitLatchOrSocket(latch, wakeEvents, sock, timeout) \ WaitLatchOrSocket(latch, wakeEvents, sock, timeout, PG_WAIT_EXTENSION) #define WaitLatch(latch, wakeEvents, timeout) \ WaitLatch(latch, wakeEvents, timeout, PG_WAIT_EXTENSION) #define GetCurrentIntegerTimestamp() GetCurrentTimestamp() #define pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams) \ pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams, NULL) #define CreateCommandTag(raw_parsetree) \ CreateCommandTag(raw_parsetree->stmt) #define PortalRun(portal, count, isTopLevel, dest, altdest, qc) \ PortalRun(portal, count, isTopLevel, true, dest, altdest, qc) #define ExecAlterExtensionStmt(stmt) \ ExecAlterExtensionStmt(NULL, stmt) /* * Pg 11 adds an argument here. We don't need to special-case 2ndQPostgres * anymore because it adds a separate ExecBRDeleteTriggers2 now, so this only * handles the stock Pg11 change. */ #define ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple) \ ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple, NULL) #undef ExecEvalExpr #define ExecEvalExpr(expr, econtext, isNull, isDone) \ ((*(expr)->evalfunc) (expr, econtext, isNull)) #define Form_pg_sequence Form_pg_sequence_data #define InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, instrument_options) \ InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, NULL, instrument_options) #define ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes) \ ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes, NULL) #define ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes) \ ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes, NULL) #define ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple) \ ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple, NULL) #define makeDefElem(name, arg) makeDefElem(name, arg, -1) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, queryEnv, dest, qc) #define PGLCreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, isInternal) \ CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, InvalidOid, InvalidOid, NULL, isInternal, false); #define PGLDoCopy(stmt, queryString, processed) \ do \ { \ ParseState* pstate = make_parsestate(NULL); \ DoCopy(pstate, stmt, -1, 0, processed); \ free_parsestate(pstate); \ } while (false); #define PGLReplicationSlotCreate(name, db_specific, persistency) ReplicationSlotCreate(name, db_specific, persistency) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif /* ad7dbee368a */ #define ExecInitExtraTupleSlot(estate) \ ExecInitExtraTupleSlot(estate, NULL, &TTSOpsHeapTuple) #define ACL_OBJECT_RELATION OBJECT_TABLE #define ACL_OBJECT_SEQUENCE OBJECT_SEQUENCE #define DatumGetJsonb DatumGetJsonbP #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum, tupledesc) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/compat14/000077500000000000000000000000001415142317000156125ustar00rootroot00000000000000pglogical-REL2_4_1/compat14/pglogical_compat.c000066400000000000000000000005721415142317000212660ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ pglogical-REL2_4_1/compat14/pglogical_compat.h000066400000000000000000000072741415142317000213010ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "access/amapi.h" #include "access/heapam.h" #include "access/table.h" #include "access/tableam.h" #include "utils/varlena.h" #define WaitLatchOrSocket(latch, wakeEvents, sock, timeout) \ WaitLatchOrSocket(latch, wakeEvents, sock, timeout, PG_WAIT_EXTENSION) #define WaitLatch(latch, wakeEvents, timeout) \ WaitLatch(latch, wakeEvents, timeout, PG_WAIT_EXTENSION) #define GetCurrentIntegerTimestamp() GetCurrentTimestamp() #define pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams) \ pg_analyze_and_rewrite(parsetree, query_string, paramTypes, numParams, NULL) #define CreateCommandTag(raw_parsetree) \ CreateCommandTag(raw_parsetree->stmt) #define PortalRun(portal, count, isTopLevel, dest, altdest, qc) \ PortalRun(portal, count, isTopLevel, true, dest, altdest, qc) #define ExecAlterExtensionStmt(stmt) \ ExecAlterExtensionStmt(NULL, stmt) /* * Pg 11 adds an argument here. We don't need to special-case 2ndQPostgres * anymore because it adds a separate ExecBRDeleteTriggers2 now, so this only * handles the stock Pg11 change. */ #define ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple) \ ExecBRDeleteTriggers(estate, epqstate, relinfo, tupleid, fdw_trigtuple, NULL) #undef ExecEvalExpr #define ExecEvalExpr(expr, econtext, isNull, isDone) \ ((*(expr)->evalfunc) (expr, econtext, isNull)) #define Form_pg_sequence Form_pg_sequence_data #define InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, instrument_options) \ InitResultRelInfo(resultRelInfo, resultRelationDesc, resultRelationIndex, NULL, instrument_options) #define ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes) \ ExecARUpdateTriggers(estate, relinfo, tupleid, fdw_trigtuple, newslot, recheckIndexes, NULL) #define ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes) \ ExecARInsertTriggers(estate, relinfo, slot, recheckIndexes, NULL) #define ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple) \ ExecARDeleteTriggers(estate, relinfo, tupleid, fdw_trigtuple, NULL) #define makeDefElem(name, arg) makeDefElem(name, arg, -1) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc) #define PGLCreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, isInternal) \ CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, InvalidOid, InvalidOid, NULL, isInternal, false); #define PGLDoCopy(stmt, queryString, processed) \ do \ { \ ParseState* pstate = make_parsestate(NULL); \ DoCopy(pstate, stmt, -1, 0, processed); \ free_parsestate(pstate); \ } while (false); #define PGLReplicationSlotCreate(name, db_specific, persistency) ReplicationSlotCreate(name, db_specific, persistency) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif /* ad7dbee368a */ #define ExecInitExtraTupleSlot(estate) \ ExecInitExtraTupleSlot(estate, NULL, &TTSOpsHeapTuple) #define ACL_OBJECT_RELATION OBJECT_TABLE #define ACL_OBJECT_SEQUENCE OBJECT_SEQUENCE #define DatumGetJsonb DatumGetJsonbP #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum, tupledesc) /* 2a10fdc4307a667883f7a3369cb93a721ade9680 */ #define getObjectDescription(object) getObjectDescription(object, false) #endif pglogical-REL2_4_1/compat94/000077500000000000000000000000001415142317000156225ustar00rootroot00000000000000pglogical-REL2_4_1/compat94/access/000077500000000000000000000000001415142317000170635ustar00rootroot00000000000000pglogical-REL2_4_1/compat94/access/commit_ts.h000066400000000000000000000004711415142317000212340ustar00rootroot00000000000000#ifndef COMMIT_TS_H #define COMMIT_TS_H #include "access/xlog.h" #include "datatype/timestamp.h" #include "replication/origin.h" extern PGDLLIMPORT bool track_commit_timestamp; extern bool TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts, RepOriginId *nodeid); #endif /* COMMIT_TS_H */ pglogical-REL2_4_1/compat94/access/stratnum.h000066400000000000000000000000001415142317000210770ustar00rootroot00000000000000pglogical-REL2_4_1/compat94/pglogical_compat.c000066400000000000000000000416151415142317000213010ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_database.h" #include "catalog/pg_type.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/pg_lsn.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/tqual.h" #include "postmaster/bgworker_internals.h" #include "pglogical_compat.h" #include "replication/origin.h" #include "access/commit_ts.h" #define InvalidRepNodeId 0 XLogRecPtr XactLastCommitEnd = 0; RepOriginId replorigin_session_origin = InvalidRepNodeId; XLogRecPtr replorigin_session_origin_lsn = InvalidXLogRecPtr; TimestampTz replorigin_session_origin_timestamp = 0; bool track_commit_timestamp = false; #define Natts_pg_replication_origin 3 #define Anum_pg_replication_origin_roident 1 #define Anum_pg_replication_origin_roname 2 #define Anum_pg_replication_origin_roremote_lsn 3 static Oid ReplicationOriginRelationId = InvalidOid; static Oid ReplicationOriginIdentIndex = InvalidOid; static Oid ReplicationOriginNameIndex = InvalidOid; /* * Replay progress of a single remote node. */ typedef struct ReplicationState { /* * Local identifier for the remote node. */ RepOriginId roident; /* * Location of the latest commit from the remote side. */ XLogRecPtr remote_lsn; /* * Remember the local lsn of the commit record so we can XLogFlush() to it * during a checkpoint so we know the commit record actually is safe on * disk. */ XLogRecPtr local_lsn; /* * Slot is setup in backend? */ pid_t acquired_by; /* * Lock protecting remote_lsn and local_lsn. */ /* LWLock lock;*/ } ReplicationState; static ReplicationState *session_replication_state = NULL; static void session_origin_xact_cb(XactEvent event, void *arg); static void ensure_replication_origin_relid(void); /* * Create a replication origin. * * Needs to be called in a transaction. */ RepOriginId replorigin_create(char *roname) { Oid roident; HeapTuple tuple = NULL; Relation rel; SnapshotData SnapshotDirty; SysScanDesc scan; ScanKeyData key; Assert(IsTransactionState()); ensure_replication_origin_relid(); /* * We need the numeric replication origin to be 16bit wide, so we cannot * rely on the normal oid allocation. Instead we simply scan * pg_replication_origin for the first unused id. That's not particularly * efficient, but this should be a fairly infrequent operation - we can * easily spend a bit more code on this when it turns out it needs to be * faster. * * We handle concurrency by taking an exclusive lock (allowing reads!) * over the table for the duration of the search. Because we use a "dirty * snapshot" we can read rows that other in-progress sessions have * written, even though they would be invisible with normal snapshots. Due * to the exclusive lock there's no danger that new rows can appear while * we're checking. */ InitDirtySnapshot(SnapshotDirty); rel = heap_open(ReplicationOriginRelationId, ExclusiveLock); for (roident = InvalidOid + 1; roident < PG_UINT16_MAX; roident++) { bool nulls[Natts_pg_replication_origin]; Datum values[Natts_pg_replication_origin]; bool collides; CHECK_FOR_INTERRUPTS(); ScanKeyInit(&key, Anum_pg_replication_origin_roident, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(roident)); scan = systable_beginscan(rel, ReplicationOriginIdentIndex, true /* indexOK */ , &SnapshotDirty, 1, &key); collides = HeapTupleIsValid(systable_getnext(scan)); systable_endscan(scan); if (!collides) { /* * Ok, found an unused roident, insert the new row and do a CCI, * so our callers can look it up if they want to. */ memset(&nulls, 0, sizeof(nulls)); values[Anum_pg_replication_origin_roident - 1] = ObjectIdGetDatum(roident); values[Anum_pg_replication_origin_roname - 1] = CStringGetTextDatum(roname); values[Anum_pg_replication_origin_roremote_lsn - 1] = LSNGetDatum(InvalidXLogRecPtr); tuple = heap_form_tuple(RelationGetDescr(rel), values, nulls); simple_heap_insert(rel, tuple); CatalogUpdateIndexes(rel, tuple); CommandCounterIncrement(); break; } } /* now release lock again, */ heap_close(rel, ExclusiveLock); if (tuple == NULL) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("could not find free replication origin OID"))); heap_freetuple(tuple); return roident; } /* * Drop replication origin. * * Needs to be called in a transaction. */ void pgl_replorigin_drop(RepOriginId roident) { HeapTuple tuple = NULL; Relation rel; SnapshotData SnapshotDirty; SysScanDesc scan; ScanKeyData key; Assert(IsTransactionState()); ensure_replication_origin_relid(); InitDirtySnapshot(SnapshotDirty); rel = heap_open(ReplicationOriginRelationId, ExclusiveLock); /* Find and delete tuple from name table */ ScanKeyInit(&key, Anum_pg_replication_origin_roident, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(roident)); scan = systable_beginscan(rel, ReplicationOriginIdentIndex, true /* indexOK */, &SnapshotDirty, 1, &key); tuple = systable_getnext(scan); if (HeapTupleIsValid(tuple)) simple_heap_delete(rel, &tuple->t_self); systable_endscan(scan); CommandCounterIncrement(); /* now release lock again, */ heap_close(rel, ExclusiveLock); } void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } RepOriginId replorigin_by_name(char *name, bool missing_ok) { HeapTuple tuple = NULL; Relation rel; Snapshot snap; SysScanDesc scan; ScanKeyData key; Oid roident = InvalidOid; ensure_replication_origin_relid(); snap = RegisterSnapshot(GetLatestSnapshot()); rel = heap_open(ReplicationOriginRelationId, RowExclusiveLock); ScanKeyInit(&key, Anum_pg_replication_origin_roname, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(name)); scan = systable_beginscan(rel, ReplicationOriginNameIndex, true /* indexOK */, snap, 1, &key); tuple = systable_getnext(scan); if (HeapTupleIsValid(tuple)) { Datum values[Natts_pg_replication_origin]; bool nulls[Natts_pg_replication_origin]; heap_deform_tuple(tuple, RelationGetDescr(rel), values, nulls); roident = DatumGetObjectId(values[Anum_pg_replication_origin_roident - 1]); } else if (!missing_ok) elog(ERROR, "cache lookup failed for replication origin named %s", name); systable_endscan(scan); UnregisterSnapshot(snap); heap_close(rel, RowExclusiveLock); return roident; } void replorigin_session_setup(RepOriginId node) { Relation rel; SysScanDesc scan; ScanKeyData key; HeapTuple tuple; XLogRecPtr remote_lsn = InvalidXLogRecPtr, local_lsn = InvalidXLogRecPtr; MemoryContext oldcontext; Assert(node != InvalidRepNodeId); if (session_replication_state != NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot setup replication origin when one is already setup"))); ensure_replication_origin_relid(); rel = heap_open(ReplicationOriginRelationId, RowExclusiveLock); ScanKeyInit(&key, Anum_pg_replication_origin_roident, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(node)); scan = systable_beginscan(rel, ReplicationOriginIdentIndex, true, NULL, 1, &key); tuple = systable_getnext(scan); if (HeapTupleIsValid(tuple)) { Datum values[Natts_pg_replication_origin]; bool nulls[Natts_pg_replication_origin]; heap_deform_tuple(tuple, RelationGetDescr(rel), values, nulls); remote_lsn = DatumGetLSN(values[Anum_pg_replication_origin_roremote_lsn - 1]); local_lsn = XactLastCommitEnd; } systable_endscan(scan); heap_close(rel, RowExclusiveLock); oldcontext = MemoryContextSwitchTo(CacheMemoryContext); session_replication_state = (ReplicationState *) palloc(sizeof(ReplicationState)); session_replication_state->roident = node; session_replication_state->remote_lsn = remote_lsn; session_replication_state->local_lsn = local_lsn; MemoryContextSwitchTo(oldcontext); RegisterXactCallback(session_origin_xact_cb, NULL); } void replorigin_session_reset(void) { ReplicationState *local_replication_state = session_replication_state; if (session_replication_state == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("no replication origin is configured"))); UnregisterXactCallback(session_origin_xact_cb, NULL); session_replication_state->acquired_by = 0; session_replication_state = NULL; pfree(local_replication_state); } /* * Ask the machinery about the point up to which we successfully replayed * changes from an already setup replication origin. */ XLogRecPtr replorigin_session_get_progress(bool flush) { XLogRecPtr remote_lsn; XLogRecPtr local_lsn; Assert(session_replication_state != NULL); remote_lsn = session_replication_state->remote_lsn; local_lsn = session_replication_state->local_lsn; if (flush && local_lsn != InvalidXLogRecPtr) XLogFlush(local_lsn); return remote_lsn; } void replorigin_advance(RepOriginId node, XLogRecPtr remote_commit, XLogRecPtr local_commit, bool go_backward, bool wal_log) { HeapTuple tuple = NULL; Relation rel; SnapshotData SnapshotDirty; SysScanDesc scan; ScanKeyData key; Assert(node != InvalidRepOriginId); Assert(IsTransactionState()); if (node == DoNotReplicateId) return; ensure_replication_origin_relid(); InitDirtySnapshot(SnapshotDirty); rel = heap_open(ReplicationOriginRelationId, ExclusiveLock); /* Find and delete tuple from name table */ ScanKeyInit(&key, Anum_pg_replication_origin_roident, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(node)); scan = systable_beginscan(rel, ReplicationOriginIdentIndex, true /* indexOK */, &SnapshotDirty, 1, &key); tuple = systable_getnext(scan); if (HeapTupleIsValid(tuple)) { HeapTuple newtuple; Datum values[Natts_pg_replication_origin]; bool nulls[Natts_pg_replication_origin]; heap_deform_tuple(tuple, RelationGetDescr(rel), values, nulls); values[Anum_pg_replication_origin_roremote_lsn - 1] = LSNGetDatum(remote_commit); newtuple = heap_form_tuple(RelationGetDescr(rel), values, nulls); simple_heap_update(rel, &tuple->t_self, newtuple); CatalogUpdateIndexes(rel, newtuple); } systable_endscan(scan); CommandCounterIncrement(); /* now release lock again, */ heap_close(rel, ExclusiveLock); return; } static void replorigin_session_advance(XLogRecPtr remote_commit, XLogRecPtr local_commit) { Assert(session_replication_state != NULL); Assert(session_replication_state->roident != InvalidRepOriginId); if (session_replication_state->local_lsn < local_commit) session_replication_state->local_lsn = local_commit; if (session_replication_state->remote_lsn < remote_commit) session_replication_state->remote_lsn = remote_commit; replorigin_advance(session_replication_state->roident, remote_commit, local_commit, false, true); } static void session_origin_xact_cb(XactEvent event, void *arg) { if (event == XACT_EVENT_PRE_COMMIT && session_replication_state != NULL && replorigin_session_origin != InvalidRepOriginId && replorigin_session_origin != DoNotReplicateId) { replorigin_session_advance(replorigin_session_origin_lsn, XactLastCommitEnd); } } static void ensure_replication_origin_relid(void) { if (ReplicationOriginRelationId == InvalidOid) { Oid schema_oid = get_namespace_oid("pglogical_origin", true); if (schema_oid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pglogical_origin extension not found"))); ReplicationOriginRelationId = get_relname_relid("replication_origin", schema_oid); ReplicationOriginIdentIndex = get_relname_relid("replication_origin_roident_index", schema_oid); ReplicationOriginNameIndex = get_relname_relid("replication_origin_roname_index", schema_oid); } } /* * Connect background worker to a database using OIDs. */ void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid) { BackgroundWorker *worker = MyBgworkerEntry; /* XXX is this the right errcode? */ if (!(worker->bgw_flags & BGWORKER_BACKEND_DATABASE_CONNECTION)) ereport(FATAL, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database connection requirement not indicated during registration"))); InitPostgres(NULL, dboid, NULL, NULL); /* it had better not gotten out of "init" mode yet */ if (!IsInitProcessingMode()) ereport(ERROR, (errmsg("invalid processing mode in background worker"))); SetProcessingMode(NormalProcessing); } bool TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts, RepOriginId *nodeid) { elog(ERROR, "TransactionIdGetCommitTsData is not implemented yet"); return false; } /* * Auxiliary function to return a TEXT array out of a list of C-strings. */ ArrayType * strlist_to_textarray(List *list) { ArrayType *arr; Datum *datums; int j = 0; ListCell *cell; MemoryContext memcxt; MemoryContext oldcxt; memcxt = AllocSetContextCreate(CurrentMemoryContext, "strlist to array", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); oldcxt = MemoryContextSwitchTo(memcxt); datums = palloc(sizeof(text *) * list_length(list)); foreach(cell, list) { char *name = lfirst(cell); datums[j++] = CStringGetTextDatum(name); } MemoryContextSwitchTo(oldcxt); arr = construct_array(datums, list_length(list), TEXTOID, -1, false, 'i'); MemoryContextDelete(memcxt); return arr; } LWLockPadded * GetNamedLWLockTranche(const char *tranche_name) { LWLock *lock = LWLockAssign(); return (LWLockPadded *)lock; } void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) { Assert(num_lwlocks == 1); RequestAddinLWLocks(num_lwlocks); } /* * CatalogTupleInsert - do heap and indexing work for a new catalog tuple * * Insert the tuple data in "tup" into the specified catalog relation. * The Oid of the inserted tuple is returned. * * This is a convenience routine for the common case of inserting a single * tuple in a system catalog; it inserts a new heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleInsertWithInfo in such cases.) */ Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup) { CatalogIndexState indstate; Oid oid; indstate = CatalogOpenIndexes(heapRel); oid = simple_heap_insert(heapRel, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); return oid; } /* * CatalogTupleUpdate - do heap and indexing work for updating a catalog tuple * * Update the tuple identified by "otid", replacing it with the data in "tup". * * This is a convenience routine for the common case of updating a single * tuple in a system catalog; it updates one heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleUpdateWithInfo in such cases.) */ void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup) { CatalogIndexState indstate; indstate = CatalogOpenIndexes(heapRel); simple_heap_update(heapRel, otid, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); } /* * CatalogTupleDelete - do heap and indexing work for deleting a catalog tuple * * Delete the tuple identified by "tid" in the specified catalog. * * With Postgres heaps, there is no index work to do at deletion time; * cleanup will be done later by VACUUM. However, callers of this function * shouldn't have to know that; we'd like a uniform abstraction for all * catalog tuple changes. Hence, provide this currently-trivial wrapper. * * The abstraction is a bit leaky in that we don't provide an optimized * CatalogTupleDeleteWithInfo version, because there is currently nothing to * optimize. If we ever need that, rather than touching a lot of call sites, * it might be better to do something about caching CatalogIndexState. */ void CatalogTupleDelete(Relation heapRel, ItemPointer tid) { simple_heap_delete(heapRel, tid); } pglogical-REL2_4_1/compat94/pglogical_compat.h000066400000000000000000000144331415142317000213040ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include #include "access/xlog.h" #include "access/xlogdefs.h" #include "catalog/objectaddress.h" #include "catalog/pg_trigger.h" #include "commands/trigger.h" #include "nodes/pg_list.h" #include "storage/lwlock.h" #include "utils/array.h" /* 9.4 lacks PG_*_MAX */ #ifndef PG_UINT32_MAX #define PG_UINT32_MAX (0xFFFFFFFF) #endif #ifndef PG_INT32_MAX #define PG_INT32_MAX (0x7FFFFFFF) #endif #ifndef PG_INT32_MIN #define PG_INT32_MIN (-0x7FFFFFFF-1) #endif #ifndef PG_UINT16_MAX #define PG_UINT16_MAX (0xFFFF) #endif /* 9.4 lacks pg_attribute_ macros, so we clone them from c.h */ /* ---- BEGIN COPIED SECTION FROM 9.5 c.h -------- */ /* * Attribute macros * * GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html * GCC: https://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html * Sunpro: https://docs.oracle.com/cd/E18659_01/html/821-1384/gjzke.html * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/function_attributes.html * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/type_attrib.html */ /* only GCC supports the unused attribute */ #ifdef __GNUC__ #define pg_attribute_unused() __attribute__((unused)) #else #define pg_attribute_unused() #endif /* GCC and XLC support format attributes */ #if defined(__GNUC__) || defined(__IBMC__) #define pg_attribute_format_arg(a) __attribute__((format_arg(a))) #define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a))) #else #define pg_attribute_format_arg(a) #define pg_attribute_printf(f,a) #endif /* GCC, Sunpro and XLC support aligned, packed and noreturn */ #if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__) #define pg_attribute_aligned(a) __attribute__((aligned(a))) #define pg_attribute_noreturn() __attribute__((noreturn)) #define pg_attribute_packed() __attribute__((packed)) #define HAVE_PG_ATTRIBUTE_NORETURN 1 #else /* * NB: aligned and packed are not given default definitions because they * affect code functionality; they *must* be implemented by the compiler * if they are to be used. */ #define pg_attribute_noreturn() #endif /* * Mark a point as unreachable in a portable fashion. This should preferably * be something that the compiler understands, to aid code generation. * In assert-enabled builds, we prefer abort() for debugging reasons. */ #if defined(HAVE__BUILTIN_UNREACHABLE) && !defined(USE_ASSERT_CHECKING) #define pg_unreachable() __builtin_unreachable() #elif defined(_MSC_VER) && !defined(USE_ASSERT_CHECKING) #define pg_unreachable() __assume(0) #else #define pg_unreachable() abort() #endif /* ---- END COPIED SECTION FROM 9.5 c.h -------- */ extern PGDLLIMPORT XLogRecPtr XactLastCommitEnd; extern void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid); extern ArrayType *strlist_to_textarray(List *list); extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks); #define GetConfigOptionByName(name, varname, missing_ok) \ (\ AssertMacro(!missing_ok), \ GetConfigOptionByName(name, varname) \ ) /* missing macros in 9.4 */ #define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ do { \ (addr).classId = (class_id); \ (addr).objectId = (object_id); \ (addr).objectSubId = (object_sub_id); \ } while (0) #define ObjectAddressSet(addr, class_id, object_id) \ ObjectAddressSubSet(addr, class_id, object_id, 0) static inline ObjectAddress PGLCreateTrigger(CreateTrigStmt *stmt, const char *queryString, Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid, bool isInternal) { ObjectAddress myself; myself.classId = TriggerRelationId; myself.objectId = CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, isInternal); myself.objectSubId = 0; return myself; } #define RawStmt Node #define PGLDoCopy(stmt, queryString, processed) DoCopy(stmt, queryString, processed) #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, dest, qc) extern Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup); extern void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup); extern void CatalogTupleDelete(Relation heapRel, ItemPointer tid); /* * nowait=true is the standard behavior. If nowait=false is called, * we ignore that, meaning we don't wait even if the caller asked to * wait. This could lead to spurious errors in race conditions, but * it's the best we can do. */ #define replorigin_drop(roident, nowait) pgl_replorigin_drop(roident) #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum) #define ALLOCSET_DEFAULT_SIZES \ ALLOCSET_DEFAULT_MINSIZE, \ ALLOCSET_DEFAULT_INITSIZE, \ ALLOCSET_DEFAULT_MAXSIZE #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif #define IndexRelationGetNumberOfKeyAttributes(rel) RelationGetNumberOfAttributes(rel) /* deprecated in PG12, removed in PG13 */ #define table_open(r, l) heap_open(r, l) #define table_openrv(r, l) heap_openrv(r, l) #define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m) #define table_close(r, l) heap_close(r, l) /* 29c94e03c7 */ #define ExecStoreHeapTuple(tuple, slot, shouldFree) ExecStoreTuple(tuple, slot, InvalidBuffer, shouldFree) /* c2fe139c20 */ #define TableScanDesc HeapScanDesc #define table_beginscan(relation, snapshot, nkeys, keys) heap_beginscan(relation, snapshot, nkeys, keys) #define table_beginscan_catalog(relation, nkeys, keys) heap_beginscan_catalog(relation, nkeys, keys) #define table_endscan(scan) heap_endscan(scan) /* 578b229718e8 */ #define CreateTemplateTupleDesc(natts) \ CreateTemplateTupleDesc(natts, false) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) #endif pglogical-REL2_4_1/compat94/pglogical_origin--1.0.0.sql000066400000000000000000000011441415142317000223620ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical_origin" to load this file. \quit DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') != 904 THEN RAISE EXCEPTION 'pglogical_origin can only be installed into PostgreSQL 9.4'; END IF; END;$$; CREATE TABLE pglogical_origin.replication_origin ( roident oid NOT NULL, roname text NOT NULL, roremote_lsn pg_lsn NOT NULL ); CREATE UNIQUE INDEX replication_origin_roident_index ON pglogical_origin.replication_origin(roident); CREATE UNIQUE INDEX replication_origin_roname_index ON pglogical_origin.replication_origin(roname); pglogical-REL2_4_1/compat94/pglogical_origin.control000066400000000000000000000003301415142317000225300ustar00rootroot00000000000000# pglogical_origin extension comment = 'PostgreSQL Logical Replication Origin Tracking Emulation for 9.4' default_version = '1.0.0' module_pathname = '$libdir/pglogical' relocatable = false schema = pglogical_origin pglogical-REL2_4_1/compat94/replication/000077500000000000000000000000001415142317000201335ustar00rootroot00000000000000pglogical-REL2_4_1/compat94/replication/origin.h000066400000000000000000000017361415142317000216020ustar00rootroot00000000000000#ifndef PGLOGICAL_COMPAT_REPLICATION_ORIGIN_H #define PGLOGICAL_COMPAT_REPLICATION_ORIGIN_H #ifndef InvalidRepOriginId typedef uint16 RepOriginId; #define InvalidRepOriginId 0 #define DoNotReplicateId PG_UINT16_MAX #endif extern PGDLLIMPORT RepOriginId replorigin_session_origin; extern PGDLLIMPORT XLogRecPtr replorigin_session_origin_lsn; extern PGDLLIMPORT TimestampTz replorigin_session_origin_timestamp; extern RepOriginId replorigin_create(char *name); extern void pgl_replorigin_drop(RepOriginId roident); extern void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait); extern RepOriginId replorigin_by_name(char *name, bool missing_ok); extern void replorigin_session_setup(RepOriginId node); extern void replorigin_session_reset(void); extern XLogRecPtr replorigin_session_get_progress(bool flush); extern void replorigin_advance(RepOriginId node, XLogRecPtr remote_commit, XLogRecPtr local_commit, bool go_backward, bool wal_log); #endif pglogical-REL2_4_1/compat95/000077500000000000000000000000001415142317000156235ustar00rootroot00000000000000pglogical-REL2_4_1/compat95/pglogical_compat.c000066400000000000000000000061151415142317000212760ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "pglogical_compat.h" LWLockPadded * GetNamedLWLockTranche(const char *tranche_name) { LWLock *lock = LWLockAssign(); return (LWLockPadded *)lock; } void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) { Assert(num_lwlocks == 1); RequestAddinLWLocks(num_lwlocks); } /* * CatalogTupleInsert - do heap and indexing work for a new catalog tuple * * Insert the tuple data in "tup" into the specified catalog relation. * The Oid of the inserted tuple is returned. * * This is a convenience routine for the common case of inserting a single * tuple in a system catalog; it inserts a new heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleInsertWithInfo in such cases.) */ Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup) { CatalogIndexState indstate; Oid oid; indstate = CatalogOpenIndexes(heapRel); oid = simple_heap_insert(heapRel, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); return oid; } /* * CatalogTupleUpdate - do heap and indexing work for updating a catalog tuple * * Update the tuple identified by "otid", replacing it with the data in "tup". * * This is a convenience routine for the common case of updating a single * tuple in a system catalog; it updates one heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleUpdateWithInfo in such cases.) */ void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup) { CatalogIndexState indstate; indstate = CatalogOpenIndexes(heapRel); simple_heap_update(heapRel, otid, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); } /* * CatalogTupleDelete - do heap and indexing work for deleting a catalog tuple * * Delete the tuple identified by "tid" in the specified catalog. * * With Postgres heaps, there is no index work to do at deletion time; * cleanup will be done later by VACUUM. However, callers of this function * shouldn't have to know that; we'd like a uniform abstraction for all * catalog tuple changes. Hence, provide this currently-trivial wrapper. * * The abstraction is a bit leaky in that we don't provide an optimized * CatalogTupleDeleteWithInfo version, because there is currently nothing to * optimize. If we ever need that, rather than touching a lot of call sites, * it might be better to do something about caching CatalogIndexState. */ void CatalogTupleDelete(Relation heapRel, ItemPointer tid) { simple_heap_delete(heapRel, tid); } pglogical-REL2_4_1/compat95/pglogical_compat.h000066400000000000000000000072541415142317000213100ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "pgstat.h" #include "catalog/indexing.h" #include "commands/trigger.h" #include "executor/executor.h" #include "replication/origin.h" #include "storage/lwlock.h" extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks); #define GetConfigOptionByName(name, varname, missing_ok) \ (\ AssertMacro(!missing_ok), \ GetConfigOptionByName(name, varname) \ ) #define PGLCreateTrigger CreateTrigger #define RawStmt Node #define PGLDoCopy(stmt, queryString, processed) DoCopy(stmt, queryString, processed) #ifdef PGXC #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, dest, sentToRemote, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, dest, sentToRemote, qc) #else #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, dest, qc) #endif extern Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup); extern void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup); extern void CatalogTupleDelete(Relation heapRel, ItemPointer tid); /* * nowait=true is the standard behavior. If nowait=false is called, * we ignore that, meaning we don't wait even if the caller asked to * wait. This could lead to spurious errors in race conditions, but * it's the best we can do. */ #define replorigin_drop(roident, nowait) replorigin_drop(roident) #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum) #define ALLOCSET_DEFAULT_SIZES \ ALLOCSET_DEFAULT_MINSIZE, \ ALLOCSET_DEFAULT_INITSIZE, \ ALLOCSET_DEFAULT_MAXSIZE #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif #define IndexRelationGetNumberOfKeyAttributes(rel) RelationGetNumberOfAttributes(rel) /* deprecated in PG12, removed in PG13 */ #define table_open(r, l) heap_open(r, l) #define table_openrv(r, l) heap_openrv(r, l) #define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m) #define table_close(r, l) heap_close(r, l) /* 29c94e03c7 */ #define ExecStoreHeapTuple(tuple, slot, shouldFree) ExecStoreTuple(tuple, slot, InvalidBuffer, shouldFree) /* c2fe139c20 */ #define TableScanDesc HeapScanDesc #define table_beginscan(relation, snapshot, nkeys, keys) heap_beginscan(relation, snapshot, nkeys, keys) #define table_beginscan_catalog(relation, nkeys, keys) heap_beginscan_catalog(relation, nkeys, keys) #define table_endscan(scan) heap_endscan(scan) /* 578b229718e8 */ #define CreateTemplateTupleDesc(natts) \ CreateTemplateTupleDesc(natts, false) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/compat96/000077500000000000000000000000001415142317000156245ustar00rootroot00000000000000pglogical-REL2_4_1/compat96/pglogical_compat.c000066400000000000000000000054711415142317000213030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_compat.c * compatibility functions (mainly with different PG versions) * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_compat.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "pglogical_compat.h" /* * CatalogTupleInsert - do heap and indexing work for a new catalog tuple * * Insert the tuple data in "tup" into the specified catalog relation. * The Oid of the inserted tuple is returned. * * This is a convenience routine for the common case of inserting a single * tuple in a system catalog; it inserts a new heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleInsertWithInfo in such cases.) */ Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup) { CatalogIndexState indstate; Oid oid; indstate = CatalogOpenIndexes(heapRel); oid = simple_heap_insert(heapRel, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); return oid; } /* * CatalogTupleUpdate - do heap and indexing work for updating a catalog tuple * * Update the tuple identified by "otid", replacing it with the data in "tup". * * This is a convenience routine for the common case of updating a single * tuple in a system catalog; it updates one heap tuple, keeping indexes * current. Avoid using it for multiple tuples, since opening the indexes * and building the index info structures is moderately expensive. * (Use CatalogTupleUpdateWithInfo in such cases.) */ void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup) { CatalogIndexState indstate; indstate = CatalogOpenIndexes(heapRel); simple_heap_update(heapRel, otid, tup); CatalogIndexInsert(indstate, tup); CatalogCloseIndexes(indstate); } /* * CatalogTupleDelete - do heap and indexing work for deleting a catalog tuple * * Delete the tuple identified by "tid" in the specified catalog. * * With Postgres heaps, there is no index work to do at deletion time; * cleanup will be done later by VACUUM. However, callers of this function * shouldn't have to know that; we'd like a uniform abstraction for all * catalog tuple changes. Hence, provide this currently-trivial wrapper. * * The abstraction is a bit leaky in that we don't provide an optimized * CatalogTupleDeleteWithInfo version, because there is currently nothing to * optimize. If we ever need that, rather than touching a lot of call sites, * it might be better to do something about caching CatalogIndexState. */ void CatalogTupleDelete(Relation heapRel, ItemPointer tid) { simple_heap_delete(heapRel, tid); } pglogical-REL2_4_1/compat96/pglogical_compat.h000066400000000000000000000063661415142317000213140ustar00rootroot00000000000000#ifndef PG_LOGICAL_COMPAT_H #define PG_LOGICAL_COMPAT_H #include "pgstat.h" #include "catalog/indexing.h" #include "commands/trigger.h" #include "executor/executor.h" #include "replication/origin.h" #define PGLCreateTrigger CreateTrigger #define RawStmt Node #define PGLDoCopy(stmt, queryString, processed) DoCopy(stmt, queryString, processed) #ifdef PGXC #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, dest, sentToRemote, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, dest, sentToRemote, qc) #else #define PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ standard_ProcessUtility(pstmt, queryString, context, params, dest, qc) #define PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc) \ next_ProcessUtility_hook(pstmt, queryString, context, params, dest, qc) #endif extern Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup); extern void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup); extern void CatalogTupleDelete(Relation heapRel, ItemPointer tid); /* * nowait=true is the standard behavior. If nowait=false is called, * we ignore that, meaning we don't wait even if the caller asked to * wait. This could lead to spurious errors in race conditions, but * it's the best we can do. */ #define replorigin_drop(roident, nowait) replorigin_drop(roident) #define pgl_heap_attisnull(tup, attnum, tupledesc) \ heap_attisnull(tup, attnum) #ifndef rbtxn_has_catalog_changes #define rbtxn_has_catalog_changes(txn) (txn->has_catalog_changes) #endif #define IndexRelationGetNumberOfKeyAttributes(rel) RelationGetNumberOfAttributes(rel) /* deprecated in PG12, removed in PG13 */ #define table_open(r, l) heap_open(r, l) #define table_openrv(r, l) heap_openrv(r, l) #define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m) #define table_close(r, l) heap_close(r, l) /* 29c94e03c7 */ #define ExecStoreHeapTuple(tuple, slot, shouldFree) ExecStoreTuple(tuple, slot, InvalidBuffer, shouldFree) /* c2fe139c20 */ #define TableScanDesc HeapScanDesc #define table_beginscan(relation, snapshot, nkeys, keys) heap_beginscan(relation, snapshot, nkeys, keys) #define table_beginscan_catalog(relation, nkeys, keys) heap_beginscan_catalog(relation, nkeys, keys) #define table_endscan(scan) heap_endscan(scan) /* 578b229718e8 */ #define CreateTemplateTupleDesc(natts) \ CreateTemplateTupleDesc(natts, false) /* 2f9661311b83 */ #define CommandTag const char * #define QueryCompletion char /* 6aba63ef3e60 */ #define pg_plan_queries(querytrees, query_string, cursorOptions, boundParams) \ pg_plan_queries(querytrees, cursorOptions, boundParams) /* cd142e032ebd50ec7974b3633269477c2c72f1cc removed replorigin_drop */ inline static void replorigin_drop_by_name(char *name, bool missing_ok, bool nowait) { RepOriginId originid; originid = replorigin_by_name(name, missing_ok); if (originid != InvalidRepOriginId) replorigin_drop(originid, nowait); } #endif pglogical-REL2_4_1/docs/000077500000000000000000000000001415142317000151125ustar00rootroot00000000000000pglogical-REL2_4_1/docs/README.md000066400000000000000000001234401415142317000163750ustar00rootroot00000000000000# pglogical 2 The pglogical 2 extension provides logical streaming replication for PostgreSQL, using a publish/subscribe model. It is based on technology developed as part of the BDR project (http://2ndquadrant.com/BDR). We use the following terms to describe data streams between nodes, deliberately reused from the earlier Slony technology: * Nodes - PostgreSQL database instances * Providers and Subscribers - roles taken by Nodes * Replication Set - a collection of tables pglogical is utilising the latest in-core features, so we have these version restrictions: * Provider & subscriber nodes must run PostgreSQL 9.4+ * PostgreSQL 9.5+ is required for replication origin filtering and conflict detection * Additionally, subscriber can be Postgres-XL 9.5+ Use cases supported are: * Upgrades between major versions (given the above restrictions) * Full database replication * Selective replication of sets of tables using replication sets * Selective replication of table rows at either publisher or subscriber side (row_filter) * Selective replication of table columns at publisher side * Data gather/merge from multiple upstream servers Architectural details: * pglogical works on a per-database level, not whole server level like physical streaming replication * One Provider may feed multiple Subscribers without incurring additional disk write overhead * One Subscriber can merge changes from several origins and detect conflict between changes with automatic and configurable conflict resolution (some, but not all aspects required for multi-master). * Cascading replication is implemented in the form of changeset forwarding. ## Requirements To use pglogical the provider and subscriber must be running PostgreSQL 9.4 or newer. The `pglogical` extension must be installed on both provider and subscriber. You must `CREATE EXTENSION pglogical` on both. Tables on the provider and subscriber must have the same names and be in the same schema. Future revisions may add mapping features. Tables on the provider and subscriber must have the same columns, with the same data types in each column. `CHECK` constraints, `NOT NULL` constraints, etc., must be the same or weaker (more permissive) on the subscriber than the provider. Tables must have the same `PRIMARY KEY`s. It is not recommended to add additional `UNIQUE` constraints other than the `PRIMARY KEY` (see below). Some additional requirements are covered in "Limitations and Restrictions", below. ## Installation ### Packages pglogical is available as RPMs via yum for Fedora, CentOS, & RHEL, and as DEBs via apt for Debian and Ubuntu, or as source code here. Please see below for instructions on installing from source. #### Installing pglogical with YUM The instructions below are valid for Red Hat family of operating systems (RHEL, CentOS, Fedora). Pre-Requisites ##### Pre-requisites These RPMs all require the PGDG PostgreSQL releases from http://yum.postgresql.org/. You cannot use them with stock PostgreSQL releases included in Fedora and RHEL. If you don’t have PostgreSQL already: - Install the appropriate PGDG repo rpm from http://yum.postgresql.org/repopackages.php - Install PostgreSQL - PostgreSQL 9.4: `yum install postgresql94-server postgresql94-contrib` - PostgreSQL 9.5: `yum install postgresql95-server postgresql95-contrib` - PostgreSQL 9.6: `yum install postgresql96-server postgresql96-contrib` - PostgreSQL 10: `yum install postgresql10-server postgresql10-contrib` - PostgreSQL 11: `yum install postgresql11-server postgresql11-contrib` - PostgreSQL 12: `yum install postgresql12-server postgresql12-contrib` - PostgreSQL 13: `yum install postgresql13-server postgresql13-contrib` - PostgreSQL 14: `yum install postgresql14-server postgresql14-contrib` Then install the “2ndQuadrant’s General Public” repository for your PostgreSQL version, by running the following instructions as root on the destination Linux server: - PostgreSQL 9.4: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/9.4/rpm | bash` - PostgreSQL 9.5: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/9.5/rpm | bash` - PostgreSQL 9.6: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/9.6/rpm | bash` - PostgreSQL 10: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/10/rpm | bash` - PostgreSQL 11: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/11/rpm | bash` - PostgreSQL 12: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/12/rpm | bash` - PostgreSQL 13: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/13/rpm | bash` - PostgreSQL 14: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/14/rpm | bash` ##### Installation Once the repository is installed, you can proceed to pglogical for your PostgreSQL version: - PostgreSQL 9.4: `yum install postgresql94-pglogical` - PostgreSQL 9.5: `yum install postgresql95-pglogical` - PostgreSQL 9.6: `yum install postgresql96-pglogical` - PostgreSQL 10: `yum install postgresql10-pglogical` - PostgreSQL 11: `yum install postgresql11-pglogical` - PostgreSQL 12: `yum install postgresql12-pglogical` - PostgreSQL 13: `yum install postgresql13-pglogical` - PostgreSQL 14: `yum install postgresql14-pglogical` You may be prompted to accept the repository GPG key for package signing: Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-2NDQ-DL-DEFAULT Importing GPG key 0xD6BAF0C3: Userid : "Public repository signing key 2ndQuadrant " Fingerprint: 8565 305c ea7d 0b66 4933 d250 9904 cd4b d6ba f0c3 From : /etc/pki/rpm-gpg/RPM-GPG-KEY-2NDQ-DL-DEFAULT Is this ok [y/N]: If so, accept the key (if it matches the above) by pressing ‘y’ then enter. (It’s signed by the 2ndQuadrant master packaging key, if you want to verify that.) #### Installing pglogical with APT The instructions below are valid for Debian and all Linux flavors based on Debian (e.g. Ubuntu). ##### Pre-requisites You can install the “2ndQuadrant’s General Public” repository by running the following instructions as root on the destination Linux server: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/deb | bash` - Add the http://apt.postgresql.org/ repository. See the site for instructions. ##### Installation Once pre-requisites are complete, installing pglogical is simply a matter of executing the following for your version of PostgreSQL: - PostgreSQL 9.4: `sudo apt-get install postgresql-9.4-pglogical` - PostgreSQL 9.5: `sudo apt-get install postgresql-9.5-pglogical` - PostgreSQL 9.6: `sudo apt-get install postgresql-9.6-pglogical` - PostgreSQL 10: `sudo apt-get install postgresql-10-pglogical` - PostgreSQL 11: `sudo apt-get install postgresql-11-pglogical` - PostgreSQL 12: `sudo apt-get install postgresql-12-pglogical` - PostgreSQL 13: `sudo apt-get install postgresql-13-pglogical` - PostgreSQL 14: `sudo apt-get install postgresql-14-pglogical` ### From source code Source code installs are the same as for any other PostgreSQL extension built using PGXS. Make sure the directory containing `pg_config` from the PostgreSQL release is listed in your `PATH` environment variable. You might have to install a `-dev` or `-devel` package for your PostgreSQL release from your package manager if you don't have `pg_config`. Then run `make` to compile, and `make install` to install. You might need to use `sudo` for the install step. e.g. for a typical Fedora or RHEL 7 install, assuming you're using the [yum.postgresql.org](http://yum.postgresql.org) packages for PostgreSQL: sudo dnf install postgresql95-devel PATH=/usr/pgsql-9.5/bin:$PATH make clean all sudo PATH=/usr/pgsql-9.5/bin:$PATH make install ## Usage This section describes basic usage of the pglogical replication extension. ### Quick setup First the PostgreSQL server has to be properly configured to support logical decoding: wal_level = 'logical' max_worker_processes = 10 # one per database needed on provider node # one per node needed on subscriber node max_replication_slots = 10 # one per node needed on provider node max_wal_senders = 10 # one per node needed on provider node shared_preload_libraries = 'pglogical' If you are using PostgreSQL 9.5+ (this won't work on 9.4) and want to handle conflict resolution with last/first update wins (see [Conflicts](#conflicts)), you can add this additional option to postgresql.conf: track_commit_timestamp = on # needed for last/first update wins conflict resolution # property available in PostgreSQL 9.5+ `pg_hba.conf` has to allow logical replication connections from localhost. Up until PostgreSQL 9.6, logical replication connections are managed using the `replication` keyword in `pg_hba.conf`. In PostgreSQL 10 and later, logical replication connections are treated by `pg_hba.conf` as regular connections to the provider database. Next the `pglogical` extension has to be installed on all nodes: CREATE EXTENSION pglogical; If using PostgreSQL 9.4, then the `pglogical_origin` extension also has to be installed on that node: CREATE EXTENSION pglogical_origin; Now create the provider node: SELECT pglogical.create_node( node_name := 'provider1', dsn := 'host=providerhost port=5432 dbname=db' ); Add all tables in `public` schema to the `default` replication set. SELECT pglogical.replication_set_add_all_tables('default', ARRAY['public']); Optionally you can also create additional replication sets and add tables to them (see [Replication sets](#replication-sets)). It's usually better to create replication sets before subscribing so that all tables are synchronized during initial replication setup in a single initial transaction. However, users of bigger databases may instead wish to create them incrementally for better control. Once the provider node is setup, subscribers can be subscribed to it. First the subscriber node must be created: SELECT pglogical.create_node( node_name := 'subscriber1', dsn := 'host=thishost port=5432 dbname=db' ); And finally on the subscriber node you can create the subscription which will start synchronization and replication process in the background: SELECT pglogical.create_subscription( subscription_name := 'subscription1', provider_dsn := 'host=providerhost port=5432 dbname=db' ); SELECT pglogical.wait_for_subscription_sync_complete('subscription1'); ### Creating subscriber nodes with base backups In addition to the SQL-level node and subscription creation, pglogical also supports creating a subscriber by cloning the provider with `pg_basebackup` and starting it up as a pglogical subscriber. This is done with the `pglogical_create_subscriber` tool; see the `--help` output. Unlike `pglogical.create_subscription`'s data sync options, this clone ignores replication sets and copies all tables on all databases. However, it's often much faster, especially over high-bandwidth links. ### Node management Nodes can be added and removed dynamically using the SQL interfaces. - `pglogical.create_node(node_name name, dsn text)` Creates a node. Parameters: - `node_name` - name of the new node, only one node is allowed per database - `dsn` - connection string to the node, for nodes that are supposed to be providers, this should be reachable from outside - `pglogical.drop_node(node_name name, ifexists bool)` Drops the pglogical node. Parameters: - `node_name` - name of an existing node - `ifexists` - if true, error is not thrown when subscription does not exist, default is false - `pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text)` Adds additional interface to a node. When node is created, the interface for it is also created with the `dsn` specified in the `create_node` and with the same name as the node. This interface allows adding alternative interfaces with different connection strings to an existing node. Parameters: - `node_name` - name of an existing node - `interface_name` - name of a new interface to be added - `dsn` - connection string to the node used for the new interface - `pglogical.alter_node_drop_interface(node_name name, interface_name name)` Remove existing interface from a node. Parameters: - `node_name` - name of and existing node - `interface_name` - name of an existing interface ### Subscription management - `pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[], synchronize_structure boolean, synchronize_data boolean, forward_origins text[], apply_delay interval)` Creates a subscription from current node to the provider node. Command does not block, just initiates the action. Parameters: - `subscription_name` - name of the subscription, must be unique - `provider_dsn` - connection string to a provider - `replication_sets` - array of replication sets to subscribe to, these must already exist, default is "{default,default_insert_only,ddl_sql}" - `synchronize_structure` - specifies if to synchronize structure from provider to the subscriber, default false - `synchronize_data` - specifies if to synchronize data from provider to the subscriber, default true - `forward_origins` - array of origin names to forward, currently only supported values are empty array meaning don't forward any changes that didn't originate on provider node (this is useful for two-way replication between the nodes), or "{all}" which means replicate all changes no matter what is their origin, default is "{all}" - `apply_delay` - how much to delay replication, default is 0 seconds - `force_text_transfer` - force the provider to replicate all columns using a text representation (which is slower, but may be used to change the type of a replicated column on the subscriber), default is false The `subscription_name` is used as `application_name` by the replication connection. This means that it's visible in the `pg_stat_replication` monitoring view. It can also be used in `synchronous_standby_names` when pglogical is used as part of [synchronous replication](#synchronous-replication) setup. Use `pglogical.wait_for_subscription_sync_complete(sub_name)` to wait for the subscription to asynchronously start replicating and complete any needed schema and/or data sync. - `pglogical.drop_subscription(subscription_name name, ifexists bool)` Disconnects the subscription and removes it from the catalog. Parameters: - `subscription_name` - name of the existing subscription - `ifexists` - if true, error is not thrown when subscription does not exist, default is false - `pglogical.alter_subscription_disable(subscription_name name, immediate bool)` Disables a subscription and disconnects it from the provider. Parameters: - `subscription_name` - name of the existing subscription - `immediate` - if true, the subscription is stopped immediately, otherwise it will be only stopped at the end of current transaction, default is false - `pglogical.alter_subscription_enable(subscription_name name, immediate bool)` Enables disabled subscription. Parameters: - `subscription_name` - name of the existing subscription - `immediate` - if true, the subscription is started immediately, otherwise it will be only started at the end of current transaction, default is false - `pglogical.alter_subscription_interface(subscription_name name, interface_name name)` Switch the subscription to use different interface to connect to provider node. Parameters: - `subscription_name` - name of an existing subscription - `interface_name` - name of an existing interface of the current provider node - `pglogical.alter_subscription_synchronize(subscription_name name, truncate bool)` All unsynchronized tables in all sets are synchronized in a single operation. Tables are copied and synchronized one by one. Command does not block, just initiates the action. Use `pglogical.wait_for_subscription_sync_complete` to wait for completion. Parameters: - `subscription_name` - name of the existing subscription - `truncate` - if true, tables will be truncated before copy, default false - `pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass)` Resynchronize one existing table. The table may not be the target of any foreign key constraints. **WARNING: This function will truncate the table immediately, and only then begin synchronising it, so it will be empty while being synced** Does not block, use `pglogical.wait_for_table_sync_complete` to wait for completion. Parameters: - `subscription_name` - name of the existing subscription - `relation` - name of existing table, optionally qualified - `pglogical.wait_for_subscription_sync_complete(subscription_name name)` Wait for a subscription or to finish synchronization after a `pglogical.create_subscription` or `pglogical.alter_subscription_synchronize`. This function waits until the subscription's initial schema/data sync, if any, are done, and until any tables pending individual resynchronisation have also finished synchronising. For best results, run `SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL)` on the provider after any replication set changes that requested resyncs, and only then call `pglogical.wait_for_subscription_sync_complete` on the subscriber. - `pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass)` Same as `pglogical.wait_for_subscription_sync_complete`, but waits only for the subscription's initial sync and the named table. Other tables pending resynchronisation are ignored. - `pglogical.wait_slot_confirm_lsn` `SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL)` Wait until all replication slots on the current node have replayed up to the xlog insert position at time of call on all providers. Returns when all slots' `confirmed_flush_lsn` passes the `pg_current_wal_insert_lsn()` at time of call. Optionally may wait for only one replication slot (first argument). Optionally may wait for an arbitrary LSN passed instead of the insert lsn (second argument). Both are usually just left null. This function is very useful to ensure all subscribers have received changes up to a certain point on the provider. - `pglogical.show_subscription_status(subscription_name name)` Shows status and basic information about subscription. Parameters: - `subscription_name` - optional name of the existing subscription, when no name was provided, the function will show status for all subscriptions on local node - `pglogical.show_subscription_table(subscription_name name, relation regclass)` Shows synchronization status of a table. Parameters: - `subscription_name` - name of the existing subscription - `relation` - name of existing table, optionally qualified - `pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name)` Adds one replication set into a subscriber. Does not synchronize, only activates consumption of events. Parameters: - `subscription_name` - name of the existing subscription - `replication_set` - name of replication set to add - `pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name)` Removes one replication set from a subscriber. Parameters: - `subscription_name` - name of the existing subscription - `replication_set` - name of replication set to remove There is also a `postgresql.conf` parameter, `pglogical.extra_connection_options`, that may be set to assign connection options that apply to all connections made by pglogical. This can be a useful place to set up custom keepalive options, etc. pglogical defaults to enabling TCP keepalives to ensure that it notices when the upstream server disappears unexpectedly. To disable them add `keepalives = 0` to `pglogical.extra_connection_options`. ### Replication sets Replication sets provide a mechanism to control which tables in the database will be replicated and which actions on those tables will be replicated. Each replicated set can specify individually if `INSERTs`, `UPDATEs`, `DELETEs` and `TRUNCATEs` on the set are replicated. Every table can be in multiple replication sets and every subscriber can subscribe to multiple replication sets as well. The resulting set of tables and actions replicated is the union of the sets the table is in. The tables are not replicated until they are added into a replication set. There are three preexisting replication sets named "default", "default_insert_only" and "ddl_sql". The "default" replication set is defined to replicate all changes to tables in it. The "default_insert_only" only replicates INSERTs and is meant for tables that don't have primary key (see [Limitations](#primary-key-required) section for details). The "ddl_sql" replication set is defined to replicate schema changes specified by `pglogical.replicate_ddl_command` The following functions are provided for managing the replication sets: - `pglogical.create_replication_set(set_name name, replicate_insert bool, replicate_update bool, replicate_delete bool, replicate_truncate bool)` This function creates a new replication set. Parameters: - `set_name` - name of the set, must be unique - `replicate_insert` - specifies if `INSERT` is replicated, default true - `replicate_update` - specifies if `UPDATE` is replicated, default true - `replicate_delete` - specifies if `DELETE` is replicated, default true - `replicate_truncate` - specifies if `TRUNCATE` is replicated, default true - `pglogical.alter_replication_set(set_name name, replicate_inserts bool, replicate_updates bool, replicate_deletes bool, replicate_truncate bool)` This function changes the parameters of the existing replication set. Parameters: - `set_name` - name of the existing replication set - `replicate_insert` - specifies if `INSERT` is replicated, default true - `replicate_update` - specifies if `UPDATE` is replicated, default true - `replicate_delete` - specifies if `DELETE` is replicated, default true - `replicate_truncate` - specifies if `TRUNCATE` is replicated, default true - `pglogical.drop_replication_set(set_name text)` Removes the replication set. Parameters: - `set_name` - name of the existing replication set - `pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean, columns text[], row_filter text)` Adds a table to replication set. Parameters: - `set_name` - name of the existing replication set - `relation` - name or OID of the table to be added to the set - `synchronize_data` - if true, the table data is synchronized on all subscribers which are subscribed to given replication set, default false - `columns` - list of columns to replicate. Normally when all columns should be replicated, this will be set to NULL which is the default - `row_filter` - row filtering expression, default NULL (no filtering), see [Row Filtering](#row-filtering) for more info. **WARNING: Use caution when synchronizing data with a valid row filter.** Using `synchronize_data=true` with a valid `row_filter` is like a one-time operation for a table. Executing it again with modified `row_filter` won't synchronize data to subscriber. Subscribers may need to call `pglogical.alter_subscription_resynchronize_table()` to fix it. - `pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean)` Adds all tables in given schemas. Only existing tables are added, table that will be created in future will not be added automatically. For how to ensure that tables created in future are added to correct replication set, see [Automatic assignment of replication sets for new tables](#automatic-assignment-of-replication-sets-for-new-tables). Parameters: - `set_name` - name of the existing replication set - `schema_names` - array of names name of existing schemas from which tables should be added - `synchronize_data` - if true, the table data is synchronized on all subscribers which are subscribed to given replication set, default false - `pglogical.replication_set_remove_table(set_name name, relation regclass)` Remove a table from replication set. Parameters: - `set_name` - name of the existing replication set - `relation` - name or OID of the table to be removed from the set - `pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean)` Adds a sequence to a replication set. Parameters: - `set_name` - name of the existing replication set - `relation` - name or OID of the sequence to be added to the set - `synchronize_data` - if true, the sequence value will be synchronized immediately, default false - `pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean)` Adds all sequences from the given schemas. Only existing sequences are added, any sequences that will be created in future will not be added automatically. Parameters: - `set_name` - name of the existing replication set - `schema_names` - array of names name of existing schemas from which tables should be added - `synchronize_data` - if true, the sequence value will be synchronized immediately, default false - `pglogical.replication_set_remove_sequence(set_name name, relation regclass)` Remove a sequence from a replication set. Parameters: - `set_name` - name of the existing replication set - `relation` - name or OID of the sequence to be removed from the set You can view the information about which table is in which set by querying the `pglogical.tables` view. #### Automatic assignment of replication sets for new tables The event trigger facility can be used for describing rules which define replication sets for newly created tables. Example: CREATE OR REPLACE FUNCTION pglogical_assign_repset() RETURNS event_trigger AS $$ DECLARE obj record; BEGIN FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() LOOP IF obj.object_type = 'table' THEN IF obj.schema_name = 'config' THEN PERFORM pglogical.replication_set_add_table('configuration', obj.objid); ELSIF NOT obj.in_extension THEN PERFORM pglogical.replication_set_add_table('default', obj.objid); END IF; END IF; END LOOP; END; $$ LANGUAGE plpgsql; CREATE EVENT TRIGGER pglogical_assign_repset_trg ON ddl_command_end WHEN TAG IN ('CREATE TABLE', 'CREATE TABLE AS') EXECUTE PROCEDURE pglogical_assign_repset(); The above example will put all new tables created in schema `config` into replication set `configuration` and all other new tables which are not created by extensions will go to `default` replication set. ### Additional functions - `pglogical.replicate_ddl_command(command text, replication_sets text[])` Execute locally and then send the specified command to the replication queue for execution on subscribers which are subscribed to one of the specified `replication_sets`. Parameters: - `command` - DDL query to execute - `replication_sets` - array of replication sets which this command should be associated with, default "{ddl_sql}" - `pglogical.synchronize_sequence(relation regclass)` Push sequence state to all subscribers. Unlike the subscription and table synchronization function, this function should be run on provider. It forces update of the tracked sequence state which will be consumed by all subscribers (replication set filtering still applies) once they replicate the transaction in which this function has been executed. Parameters: - `relation` - name of existing sequence, optionally qualified ### Row Filtering PGLogical allows row based filtering both on provider side and the subscriber side. #### Row Filtering on Provider On the provider the row filtering can be done by specifying `row_filter` parameter for the `pglogical.replication_set_add_table` function. The `row_filter` is normal PostgreSQL expression which has the same limitations on what's allowed as the `CHECK` constraint. Simple `row_filter` would look something like `row_filter := 'id > 0'` which would ensure that only rows where values of `id` column is bigger than zero will be replicated. It's allowed to use volatile function inside `row_filter` but caution must be exercised with regard to writes as any expression which will do writes will throw error and stop replication. It's also worth noting that the `row_filter` is running inside the replication session so session specific expressions such as `CURRENT_USER` will have values of the replication session and not the session which did the writes. #### Row Filtering on Subscriber On the subscriber the row based filtering can be implemented using standard `BEFORE TRIGGER` mechanism. It is required to mark any such triggers as either `ENABLE REPLICA` or `ENABLE ALWAYS` otherwise they will not be executed by the replication process. ## Synchronous Replication Synchronous replication is supported using same standard mechanism provided by PostgreSQL for physical replication. The `synchronous_commit` and `synchronous_standby_names` settings will affect when `COMMIT` command reports success to client if pglogical subscription name is used in `synchronous_standby_names`. Refer to PostgreSQL documentation for more info about how to configure these two variables. ## Conflicts In case the node is subscribed to multiple providers, or when local writes happen on a subscriber, conflicts can arise for the incoming changes. These are automatically detected and can be acted on depending on the configuration. The configuration of the conflicts resolver is done via the `pglogical.conflict_resolution` setting. The resolved conflicts are logged using the log level set using `pglogical.conflict_log_level`. This parameter defaults to `LOG`. If set to lower level than `log_min_messages` the resolved conflicts won't appear in the server log. ## Configuration options Some aspects of PGLogical can be configured using configuration options that can be either set in `postgresql.conf` or via `ALTER SYSTEM SET`. - `pglogical.conflict_resolution` Sets the resolution method for any detected conflicts between local data and incoming changes. Possible values: - `error` - the replication will stop on error if conflict is detected and manual action is needed for resolving - `apply_remote` - always apply the change that's conflicting with local data - `keep_local` - keep the local version of the data and ignore the conflicting change that is coming from the remote node - `last_update_wins` - the version of data with newest commit timestamp will be kept (this can be either local or remote version) - `first_update_wins` - the version of the data with oldest timestamp will be kept (this can be either local or remote version) The available settings and defaults depend on version of PostgreSQL and other settings. The default value in PostgreSQL is `apply_remote`. The `keep_local`, `last_update_wins` and `first_update_wins` settings require `track_commit_timestamp` PostgreSQL setting to be enabled. As `track_commit_timestamp` is not available in PostgreSQL 9.4 `pglogical.conflict_resolution` can only be `apply_remote` or `error`. In Postgres-XL, the only supported value and the default is `error`. - `pglogical.conflict_log_level` Sets the log level for reporting detected conflicts when the `pglogical.conflict_resolution` is set to anything else than `error`. Main use for this setting is to suppress logging of conflicts. Possible values are same as for `log_min_messages` PostgreSQL setting. The default is `LOG`. - `pglogical.batch_inserts` Tells PGLogical to use batch insert mechanism if possible. Batch mechanism uses PostgreSQL internal batch insert mode which is also used by `COPY` command. The batch inserts will improve replication performance of transactions that did many inserts into one table. PGLogical will switch to batch mode when transaction did more than 5 INSERTs. It's only possible to switch to batch mode when there are no `INSTEAD OF INSERT` and `BEFORE INSERT` triggers on the table and when there are no defaults with volatile expressions for columns of the table. Also the batch mode will only work when `pglogical.conflict_resolution` is set to `error`. The default is `true`. - `pglogical.use_spi` Tells PGLogical to use SPI interface to form actual SQL (`INSERT`, `UPDATE`, `DELETE`) statements to apply incoming changes instead of using internal low level interface. This is mainly useful for Postgres-XL and debugging purposes. The default in PostgreSQL is `false`. This can be set to `true` only when `pglogical.conflict_resolution` is set to `error`. In this state, conflicts are not detected. In Postgres-XL the default and only allowed setting is `true`. - `pglogical.temp_directory` Defines system path where to put temporary files needed for schema synchronization. This path need to exist and be writable by user running Postgres. Default is empty, which tells PGLogical to use default temporary directory based on environment and operating system settings. ## Limitations and restrictions ### Superuser is required Currently pglogical replication and administration requires superuser privileges. It may be later extended to more granular privileges. ### `UNLOGGED` and `TEMPORARY` not replicated `UNLOGGED` and `TEMPORARY` tables will not and cannot be replicated, much like with physical streaming replication. ### One database at a time To replicate multiple databases you must set up individual provider/subscriber relationships for each. There is no way to configure replication for all databases in a PostgreSQL install at once. ### PRIMARY KEY or REPLICA IDENTITY required `UPDATE`s and `DELETE`s cannot be replicated for tables that lack a `PRIMARY KEY` or other valid replica identity such as using an index, which must be unique, not partial, not deferrable, and include only columns marked NOT NULL. Replication has no way to find the tuple that should be updated/deleted since there is no unique identifier. `REPLICA IDENTITY FULL` is not supported yet. ### Only one unique index/constraint/PK If more than one upstream is configured or the downstream accepts local writes then only one `UNIQUE` index should be present on downstream replicated tables. Conflict resolution can only use one index at a time so conflicting rows may `ERROR` if a row satisfies the `PRIMARY KEY` but violates a `UNIQUE` constraint on the downstream side. This will stop replication until the downstream table is modified to remove the violation. It's fine to have extra unique constraints on an upstream if the downstream only gets writes from that upstream and nowhere else. The rule is that the downstream constraints must *not be more restrictive* than those on the upstream(s). Partial secondary unique indexes are permitted, but will be ignored for conflict resolution purposes. ### Unique constraints must not be deferrable On the downstream end pglogical does not support index-based constraints defined as `DEFERRABLE`. It will emit the error ERROR: pglogical doesn't support index rechecks needed for deferrable indexes DETAIL: relation "public"."test_relation" has deferrable indexes: "index1", "index2" if such an index is present when it attempts to apply changes to a table. ### DDL Automatic DDL replication is not supported. Managing DDL so that the provider and subscriber database(s) remain compatible is the responsibility of the user. pglogical provides the `pglogical.replicate_ddl_command` function to allow DDL to be run on the provider and subscriber at a consistent point. ### No replication queue flush There's no support for freezing transactions on the master and waiting until all pending queued xacts are replayed from slots. Support for making the upstream read-only for this will be added in a future release. This means that care must be taken when applying table structure changes. If there are committed transactions that aren't yet replicated and the table structure of the provider and subscriber are changed at the same time in a way that makes the subscriber table incompatible with the queued transactions replication will stop. Administrators should either ensure that writes to the master are stopped before making schema changes, or use the `pglogical.replicate_ddl_command` function to queue schema changes so they're replayed at a consistent point on the replica. Once multi-master replication support is added then using `pglogical.replicate_ddl_command` will not be enough, as the subscriber may be generating new xacts with the old structure after the schema change is committed on the publisher. Users will have to ensure writes are stopped on all nodes and all slots are caught up before making schema changes. ### FOREIGN KEYS Foreign keys constraints are not enforced for the replication process - what succeeds on provider side gets applied to subscriber even if the `FOREIGN KEY` would be violated. ### TRUNCATE Using `TRUNCATE ... CASCADE` will only apply the `CASCADE` option on the provider side. (Properly handling this would probably require the addition of `ON TRUNCATE CASCADE` support for foreign keys in PostgreSQL). `TRUNCATE ... RESTART IDENTITY` is not supported. The identity restart step is not replicated to the replica. ### Sequences The state of sequences added to replication sets is replicated periodically and not in real-time. Dynamic buffer is used for the value being replicated so that the subscribers actually receive future state of the sequence. This minimizes the chance of subscriber's notion of sequence's `last_value` falling behind but does not completely eliminate the possibility. It might be desirable to call `synchronize_sequence` to ensure all subscribers have up to date information about given sequence after "big events" in the database such as data loading or during the online upgrade. It's generally recommended to use `bigserial` and `bigint` types for sequences on multi-node systems as smaller sequences might reach end of the sequence space fast. Users who want to have independent sequences on provider and subscriber can avoid adding sequences to replication sets and create sequences with step interval equal to or greater than the number of nodes. And then setting a different offset on each node. Use the `INCREMENT BY` option for `CREATE SEQUENCE` or `ALTER SEQUENCE`, and use `setval(...)` to set the start point. ### Triggers Apply process and the initial COPY process both run with `session_replication_role` set to `replica` which means that `ENABLE REPLICA` and `ENABLE ALWAYS` triggers will be fired. ### PostgreSQL Version differences PGLogical can replicate across PostgreSQL major versions. Despite that, long term cross-version replication is not considered a design target, though it may often work. Issues where changes are valid on the provider but not on the subscriber are more likely to arise when replicating across versions. It is safer to replicate from an old version to a newer version since PostgreSQL maintains solid backward compatibility but only limited forward compatibility. Initial schema synchronization is only supported when replicating between same version of PostgreSQL or from lower version to higher version. Replicating between different minor versions makes no difference at all. ### Database encoding differences PGLogical does not support replication between databases with different encoding. We recommend using `UTF-8` encoding in all replicated databases. ### Large objects PostgreSQL's logical decoding facility does not support decoding changes to large objects, so pglogical cannot replicate large objects. ### Postgres-XL Minimum supported version of Postgres-XL is 9.5r1.5. Postgres-XL is only supported as subscriber (cannot be a provider). For workloads with many small transactions the performance of replication may suffer due to increased write latency. On the other hand large insert (or bulkcopy) transactions are heavily optimized to work very fast with Postgres-XL. Also any DDL limitations apply so extra care need to be taken when using `replicate_ddl_command()`. Postgres-XL changes defaults and available settings for `pglogical.conflict_resolution` and `pglogical.use_spi` configuration options. ## Appendix A: Credits and License pglogical has been designed, developed and tested by the 2ndQuadrant team * Petr Jelinek * Craig Ringer * Simon Riggs * Pallavi Sontakke * Umair Shahid pglogical license is The PostgreSQL License pglogical copyright is donated to PostgreSQL Global Development Group ## Appendix B: Release Notes ### pglogical 2.4.1 Version 2.4.1 is a maintenance release of pglogical 2. #### Changes * Fix "snapshot still active" warnings and snapshot handling with domains. Problems introduced in version 2.4.0. * Flush error state when having failed to drop remote replication slot ### pglogical 2.4.0 Version 2.4.0 is a maintenance release of pglogical 2. #### Changes * Add preliminary support for PostgreSQL 14 (beta) * Fix pglogical_show_subscription_table to return row rather than set of rows * Fix snapshot handling in output plugin and apply worker * Fix command quoting on Windows so that it actually works Seems like the API used before has 1024 limit on command line length * Make sure that the schema syncing process can be interrupted on Windows * Fix compilation issues with pglogical_create_subscriber on Windows * Fix double closing of relation when a BEFORE ROW DELETE trigger returns NULL in the apply worker * Fix multi-insert crash in the SPI apply worker * Fix multi-insert doing insert of last tuple multiple times in apply worker * Make sure debug_query_string is always set Newer versions of PostgreSQL require that debug_query_string is always set. pglogical-REL2_4_1/docs/pglogical.yml000066400000000000000000000004531415142317000176000ustar00rootroot00000000000000site_name: pglogical docs_dir: . site_dir: ../docs-site theme: name: 2qmkdocs one_page: true copyright: 'Copyright 2019 © 2ndQuadrant Limited' nav: - pglogical: README.md markdown_extensions: - codehilite: css_class: highlight guess_lang: true plugins: - search pglogical-REL2_4_1/expected/000077500000000000000000000000001415142317000157635ustar00rootroot00000000000000pglogical-REL2_4_1/expected/add_table.out000066400000000000000000000416171415142317000204240ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE TABLE public.test_publicschema(id serial primary key, data text); \c :subscriber_dsn CREATE TABLE public.test_publicschema(data text, id serial primary key); \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SCHEMA "strange.schema-IS"; CREATE TABLE public.test_nosync(id serial primary key, data text); CREATE TABLE "strange.schema-IS".test_strangeschema(id serial primary key, "S0m3th1ng" timestamptz DEFAULT '1993-01-01 00:00:00 CET'); CREATE TABLE "strange.schema-IS".test_diff_repset(id serial primary key, data text DEFAULT ''); $$); replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- create some replication sets SELECT * FROM pglogical.create_replication_set('repset_test'); create_replication_set ------------------------ 2154402640 (1 row) -- move tables to replication set that is not subscribed SELECT * FROM pglogical.replication_set_add_table('repset_test', 'test_publicschema'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_test', 'test_nosync'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_test', '"strange.schema-IS".test_strangeschema'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_test', '"strange.schema-IS".test_diff_repset'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_sequences('repset_test', '{public}'); replication_set_add_all_sequences ----------------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_sequence('repset_test', pg_get_serial_sequence('"strange.schema-IS".test_strangeschema', 'id')); replication_set_add_sequence ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_sequence('repset_test', pg_get_serial_sequence('"strange.schema-IS".test_diff_repset', 'id')); replication_set_add_sequence ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_all_sequences('default', '{public}'); replication_set_add_all_sequences ----------------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_sequence('default', pg_get_serial_sequence('"strange.schema-IS".test_strangeschema', 'id')); replication_set_add_sequence ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_sequence('default', pg_get_serial_sequence('"strange.schema-IS".test_diff_repset', 'id')); replication_set_add_sequence ------------------------------ t (1 row) INSERT INTO public.test_publicschema(data) VALUES('a'); INSERT INTO public.test_publicschema(data) VALUES('b'); INSERT INTO public.test_nosync(data) VALUES('a'); INSERT INTO public.test_nosync(data) VALUES('b'); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(DEFAULT, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(DEFAuLT, DEFAULT); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.test_publicschema; data | id ------+---- (0 rows) \c :provider_dsn -- move tables back to the subscribed replication set SELECT * FROM pglogical.replication_set_add_table('default', 'test_publicschema', true); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'test_nosync', false); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', '"strange.schema-IS".test_strangeschema', true); replication_set_add_table --------------------------- t (1 row) \c :subscriber_dsn SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_publicschema'); wait_for_table_sync_complete ------------------------------ (1 row) SELECT pglogical.wait_for_table_sync_complete('test_subscription', '"strange.schema-IS".test_strangeschema'); wait_for_table_sync_complete ------------------------------ (1 row) RESET statement_timeout; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+-------------------+--------------------+---------- d | 3848008564 | public | test_publicschema | t d | 3848008564 | strange.schema-IS | test_strangeschema | t f | 3848008564 | | | t (3 rows) \c :provider_dsn DO $$ -- give it 10 seconds to synchronize the tables BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots) = 1 THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT count(1) FROM pg_replication_slots; count ------- 1 (1 row) INSERT INTO public.test_publicschema VALUES(3, 'c'); INSERT INTO public.test_publicschema VALUES(4, 'd'); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(3, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(4, DEFAULT); SELECT pglogical.synchronize_sequence(c.oid) FROM pg_class c, pg_namespace n WHERE c.relkind = 'S' AND c.relnamespace = n.oid AND n.nspname IN ('public', 'strange.schema-IS'); synchronize_sequence ---------------------- t t t t (4 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.test_publicschema; data | id ------+---- a | 1 b | 2 c | 3 d | 4 (4 rows) SELECT * FROM "strange.schema-IS".test_strangeschema; id | S0m3th1ng ----+------------------------------ 1 | Thu Dec 31 15:00:00 1992 PST 2 | Thu Dec 31 15:00:00 1992 PST 3 | Thu Dec 31 15:00:00 1992 PST 4 | Thu Dec 31 15:00:00 1992 PST (4 rows) SELECT * FROM pglogical.alter_subscription_synchronize('test_subscription'); alter_subscription_synchronize -------------------------------- t (1 row) BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_nosync'); wait_for_table_sync_complete ------------------------------ (1 row) COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+-------------------+--------------------+---------- d | 3848008564 | public | test_nosync | t d | 3848008564 | public | test_publicschema | t d | 3848008564 | strange.schema-IS | test_strangeschema | t f | 3848008564 | | | t (4 rows) SELECT * FROM public.test_nosync; id | data ----+------ 1 | a 2 | b (2 rows) DELETE FROM public.test_publicschema WHERE id > 1; SELECT * FROM public.test_publicschema; data | id ------+---- a | 1 (1 row) SELECT * FROM pglogical.alter_subscription_resynchronize_table('test_subscription', 'test_publicschema'); alter_subscription_resynchronize_table ---------------------------------------- t (1 row) BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_publicschema'); wait_for_table_sync_complete ------------------------------ (1 row) COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+-------------------+--------------------+---------- d | 3848008564 | public | test_nosync | t d | 3848008564 | public | test_publicschema | t d | 3848008564 | strange.schema-IS | test_strangeschema | t f | 3848008564 | | | t (4 rows) SELECT * FROM public.test_publicschema; data | id ------+---- a | 1 b | 2 c | 3 d | 4 (4 rows) \x SELECT nspname, relname, status IN ('synchronized', 'replicating') FROM pglogical.show_subscription_table('test_subscription', 'test_publicschema'); -[ RECORD 1 ]--------------- nspname | public relname | test_publicschema ?column? | t \x BEGIN; SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'repset_test'); alter_subscription_add_replication_set ---------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default'); alter_subscription_remove_replication_set ------------------------------------------- t (1 row) COMMIT; DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; \c :provider_dsn SELECT * FROM pglogical.replication_set_remove_table('repset_test', '"strange.schema-IS".test_strangeschema'); replication_set_remove_table ------------------------------ t (1 row) INSERT INTO "strange.schema-IS".test_diff_repset VALUES(1); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(2); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(5, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(6, DEFAULT); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; id | data ----+------ 1 | 2 | (2 rows) SELECT * FROM "strange.schema-IS".test_strangeschema; id | S0m3th1ng ----+------------------------------ 1 | Thu Dec 31 15:00:00 1992 PST 2 | Thu Dec 31 15:00:00 1992 PST 3 | Thu Dec 31 15:00:00 1992 PST 4 | Thu Dec 31 15:00:00 1992 PST (4 rows) \c :provider_dsn SELECT * FROM pglogical.alter_replication_set('repset_test', replicate_insert := false, replicate_update := false, replicate_delete := false, replicate_truncate := false); alter_replication_set ----------------------- 2154402640 (1 row) INSERT INTO "strange.schema-IS".test_diff_repset VALUES(3); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(4); UPDATE "strange.schema-IS".test_diff_repset SET data = 'data'; DELETE FROM "strange.schema-IS".test_diff_repset WHERE id < 3; TRUNCATE "strange.schema-IS".test_diff_repset; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; id | data ----+------ 3 | data 4 | data (2 rows) \c :provider_dsn SELECT * FROM pglogical.alter_replication_set('repset_test', replicate_insert := true, replicate_truncate := true); alter_replication_set ----------------------- 2154402640 (1 row) INSERT INTO "strange.schema-IS".test_diff_repset VALUES(5); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(6); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; id | data ----+------ 3 | data 4 | data 5 | 6 | (4 rows) \c :provider_dsn TRUNCATE "strange.schema-IS".test_diff_repset; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; id | data ----+------ (0 rows) SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default'); alter_subscription_add_replication_set ---------------------------------------- t (1 row) DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT N.nspname AS schemaname, C.relname AS tablename, (nextval(C.oid) > 1000) as synced FROM pg_class C JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'S' AND N.nspname IN ('public', 'strange.schema-IS') ORDER BY 1, 2; schemaname | tablename | synced -------------------+---------------------------+-------- public | test_nosync_id_seq | t public | test_publicschema_id_seq | t strange.schema-IS | test_diff_repset_id_seq | t strange.schema-IS | test_strangeschema_id_seq | t (4 rows) \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pg_stat_replication) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema CASCADE; DROP TABLE public.test_nosync CASCADE; DROP SCHEMA "strange.schema-IS" CASCADE; $$); NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to table "strange.schema-IS".test_diff_repset membership in replication set repset_test NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to table "strange.schema-IS".test_strangeschema membership in replication set default replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.synctest(a int primary key, b text); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_test', 'synctest', synchronize_data := false); replication_set_add_table --------------------------- t (1 row) INSERT INTO synctest VALUES (1, '1'); -- no way to see if this worked currently, but if one can manually check -- if there is conflict in log or not (conflict = bad here) SELECT pglogical.replicate_ddl_command($$ SELECT pg_sleep(5); UPDATE public.synctest SET b = md5(a::text); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO synctest VALUES (2, '2'); \c :subscriber_dsn SELECT * FROM pglogical.alter_subscription_resynchronize_table('test_subscription', 'synctest'); alter_subscription_resynchronize_table ---------------------------------------- t (1 row) BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'synctest'); wait_for_table_sync_complete ------------------------------ (1 row) COMMIT; \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) SELECT * FROM synctest; a | b ---+---------------------------------- 1 | c4ca4238a0b923820dcc509a6f75849b 2 | 2 (2 rows) \c :subscriber_dsn SELECT * FROM synctest; a | b ---+---------------------------------- 1 | c4ca4238a0b923820dcc509a6f75849b 2 | 2 (2 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.synctest CASCADE; $$); NOTICE: drop cascades to table public.synctest membership in replication set repset_test replicate_ddl_command ----------------------- t (1 row) \c :subscriber_dsn -- this is to reorder repsets to default order BEGIN; SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default'); alter_subscription_remove_replication_set ------------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'ddl_sql'); alter_subscription_remove_replication_set ------------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default_insert_only'); alter_subscription_remove_replication_set ------------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'repset_test'); alter_subscription_remove_replication_set ------------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default'); alter_subscription_add_replication_set ---------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default_insert_only'); alter_subscription_add_replication_set ---------------------------------------- t (1 row) SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'ddl_sql'); alter_subscription_add_replication_set ---------------------------------------- t (1 row) COMMIT; pglogical-REL2_4_1/expected/apply_delay.out000066400000000000000000000111571415142317000210240ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; subdb ------------ 'postgres' (1 row) \gset \c :provider_dsn SELECT * FROM pglogical.create_replication_set('delay'); create_replication_set ------------------------ 3064111751 (1 row) \c :subscriber_dsn CREATE or REPLACE function int2interval (x integer) returns interval as $$ select $1*'1 sec'::interval $$ language sql; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_delay', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{delay}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false, apply_delay := int2interval(2) -- 2 seconds ); create_subscription --------------------- 1550781037 (1 row) BEGIN; SET LOCAL statement_timeout = '30s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription_delay'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+--------------+--------------+---------- i | 1550781037 | | | t f | 3848008564 | | | t (2 rows) SELECT status FROM pglogical.show_subscription_status() WHERE subscription_name = 'test_subscription_delay'; status ------------- replicating (1 row) -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, database, active FROM pg_replication_slots; plugin | slot_type | database | active ------------------+-----------+------------+-------- pglogical_output | logical | regression | t pglogical_output | logical | regression | t (2 rows) SELECT count(*) FROM pg_stat_replication; count ------- 2 (1 row) CREATE TABLE public.timestamps ( id text primary key, ts timestamptz ); SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) -- clear old applies, from any previous tests etc. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO timestamps VALUES ('ts1', CURRENT_TIMESTAMP); SELECT * FROM pglogical.replication_set_add_table('delay', 'basic_dml1'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO timestamps VALUES ('ts2', CURRENT_TIMESTAMP); INSERT INTO basic_dml1(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO timestamps VALUES ('ts3', CURRENT_TIMESTAMP); SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts1'))) :: integer >= 2 as ddl_replication_delayed; ddl_replication_delayed ------------------------- t (1 row) SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts3')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2'))) :: integer >= 2 as inserts_replication_delayed; inserts_replication_delayed ----------------------------- t (1 row) \c :subscriber_dsn SELECT * FROM basic_dml1; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (5 rows) SELECT pglogical.drop_subscription('test_subscription_delay'); drop_subscription ------------------- 1 (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM pglogical.drop_replication_set('delay'); drop_replication_set ---------------------- t (1 row) DROP TABLE public.timestamps CASCADE; SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml1 CASCADE; $$); replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/att_list.out000066400000000000000000000204001415142317000203330ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); -- fails as primary key is not included SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', columns := '{ data, something}'); ERROR: REPLICA IDENTITY columns must be replicated SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', columns := '{id, data, something}'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn CREATE TABLE public.basic_dml ( id serial primary key, data text, something interval, subonly integer, subonly_def integer DEFAULT 99 ); \c :provider_dsn -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 1 | foo | @ 1 min 2 | bar | @ 84 days 3 | baz | @ 2 years 1 hour 4 | qux | @ 8 mons 2 days 5 | | (5 rows) -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, something = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 1 | foo | @ 1 min 2 | bar | @ 84 days 3 | baz | @ 2 years 1 hour 4 | | @ 3 days 5 | | (5 rows) -- update multiple rows \c :provider_dsn SELECT * FROM basic_dml order by id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 4 | | @ 3 days 5 | 1 | | (5 rows) UPDATE basic_dml SET data = data || other::text; SELECT * FROM basic_dml order by id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo5 | @ 1 min 2 | 4 | bar4 | @ 84 days 3 | 3 | baz3 | @ 2 years 1 hour 4 | 4 | | @ 3 days 5 | 1 | | (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 1 | foo5 | @ 1 min 2 | bar4 | @ 84 days 3 | baz3 | @ 2 years 1 hour 4 | | @ 3 days 5 | | (5 rows) \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT * FROM basic_dml order by id; id | other | data | something ----+-------+-------+------------------ 1 | 1 | foo51 | @ 1 min 2 | 2 | bar42 | @ 84 days 3 | 3 | baz33 | @ 2 years 1 hour 4 | 4 | | @ 3 days 5 | 5 | | (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+-------+------------------ 1 | foo51 | @ 1 min 2 | bar42 | @ 84 days 3 | baz33 | @ 2 years 1 hour 4 | | @ 3 days 5 | | (5 rows) \c :provider_dsn UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; id | data | something | subonly | subonly_def ----+-------+--------------------+---------+------------- 1 | foo51 | @ 50 secs | | 99 2 | bar42 | @ 84 days -10 secs | | 99 3 | baz33 | @ 2 years 1 hour | | 99 4 | | @ 3 days 10 secs | | 99 5 | | | | 99 (5 rows) -- delete one row \c :provider_dsn DELETE FROM basic_dml WHERE id = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+-------+------------------ 1 | foo51 | @ 50 secs 3 | baz33 | @ 2 years 1 hour 4 | | @ 3 days 10 secs 5 | | (4 rows) -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 4 | | @ 3 days 10 secs 5 | | (2 rows) -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+----------- (0 rows) -- copy \c :provider_dsn \COPY basic_dml FROM STDIN WITH CSV SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ------+------+----------- 9000 | aaa | @ 1 hour 9001 | bbb | @ 2 years 9002 | ccc | @ 3 mins 9003 | ddd | @ 4 days (4 rows) \c :provider_dsn -- drop columns being filtered at provider -- even primary key can be dropped ALTER TABLE basic_dml DROP COLUMN id; ALTER TABLE basic_dml DROP COLUMN data; \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ------+------+----------- 9000 | aaa | @ 1 hour 9001 | bbb | @ 2 years 9002 | ccc | @ 3 mins 9003 | ddd | @ 4 days (4 rows) \c :provider_dsn -- add column to table at provider ALTER TABLE basic_dml ADD COLUMN data1 text; INSERT INTO basic_dml(other, data1, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval); -- inserts after dropping primary key still reach the subscriber. UPDATE basic_dml set something = something - '10 seconds'::interval; DELETE FROM basic_dml WHERE other = 2; SELECT * FROM basic_dml ORDER BY other; other | something | data1 -------+--------------------+------- 1 | @ 59 mins 50 secs | 3 | @ 2 mins 50 secs | 4 | @ 4 days -10 secs | 4 | @ 84 days -10 secs | bar 5 | @ 50 secs | foo (5 rows) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml', ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+-------------+---------------- public | basic_dml | {something} | f (1 row) \c :subscriber_dsn -- verify that columns are not automatically added for filtering unless told so. SELECT * FROM pglogical.show_subscription_table('test_subscription', 'basic_dml'); nspname | relname | status ---------+-----------+--------- public | basic_dml | unknown (1 row) SELECT * FROM basic_dml ORDER BY id; id | data | something | subonly | subonly_def ------+------+-----------+---------+------------- 1 | | @ 1 min | | 99 2 | | @ 84 days | | 99 9000 | aaa | @ 1 hour | | 99 9001 | bbb | @ 2 years | | 99 9002 | ccc | @ 3 mins | | 99 9003 | ddd | @ 4 days | | 99 (6 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/basic.out000066400000000000000000000127151415142317000176030ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn ALTER TABLE public.basic_dml ADD COLUMN subonly integer; ALTER TABLE public.basic_dml ADD COLUMN subonly_def integer DEFAULT 99; \c :provider_dsn -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | something | subonly | subonly_def ----+-------+------+------------------+---------+------------- 1 | 5 | foo | @ 1 min | | 99 2 | 4 | bar | @ 84 days | | 99 3 | 3 | baz | @ 2 years 1 hour | | 99 4 | 2 | qux | @ 8 mons 2 days | | 99 5 | 1 | | | | 99 (5 rows) -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, something = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 4 | | @ 3 days 5 | 1 | | (5 rows) -- update multiple rows \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 1 | foo1 | @ 1 min 2 | 2 | bar2 | @ 84 days 3 | 3 | baz3 | @ 2 years 1 hour 4 | 4 | | @ 3 days 5 | 5 | | (5 rows) \c :provider_dsn UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | something | subonly | subonly_def ----+-------+------+--------------------+---------+------------- 1 | 1 | foo1 | @ 50 secs | | 99 2 | 2 | bar2 | @ 84 days -10 secs | | 99 3 | 3 | baz3 | @ 2 years 1 hour | | 99 4 | 4 | | @ 3 days 10 secs | | 99 5 | 5 | | | | 99 (5 rows) -- delete one row \c :provider_dsn DELETE FROM basic_dml WHERE id = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 1 | foo1 | @ 50 secs 3 | 3 | baz3 | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (4 rows) -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------ 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (2 rows) -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+----------- (0 rows) -- copy \c :provider_dsn \COPY basic_dml FROM STDIN WITH CSV SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ------+-------+------+----------- 9000 | 1 | aaa | @ 1 hour 9001 | 2 | bbb | @ 2 years 9002 | 3 | ccc | @ 3 mins 9003 | 4 | ddd | @ 4 days (4 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/bidirectional.out000066400000000000000000000071751415142317000213360ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT E'\'' || current_database() || E'\'' AS pubdb; pubdb -------------- 'regression' (1 row) \gset \c :provider_dsn DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_bidirectional', provider_dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, synchronize_data := false, forward_origins := '{}'); create_subscription --------------------- 4269973126 (1 row) BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_bidirectional'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; \c :subscriber_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider_dsn SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); replication_set_add_table --------------------------- t (1 row) -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (5 rows) UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; id | other | data | something ----+-------+------+------------------------- 1 | 1 | foo | @ 50 secs 2 | 2 | bar | @ 84 days -10 secs 3 | 3 | baz | @ 2 years 1 hour 4 | 4 | qux | @ 8 mons 2 days 10 secs 5 | 5 | | (5 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.drop_subscription('test_bidirectional'); drop_subscription ------------------- 1 (1 row) SET client_min_messages = 'warning'; DROP EXTENSION IF EXISTS pglogical_origin; \c :subscriber_dsn \a SELECT slot_name FROM pg_replication_slots WHERE database = current_database(); slot_name (0 rows) SELECT count(*) FROM pg_stat_replication WHERE application_name = 'test_bidirectional'; count 0 (1 row) pglogical-REL2_4_1/expected/column_filter.out000066400000000000000000000217641415142317000213700ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset -- create and populate table at provider \c :provider_dsn CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | (1 row) INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); \c :subscriber_dsn -- create table on subscriber to receive replicated filtered data from provider -- there are some extra columns too, and we omit 'other' as a non-replicated -- table on upstream only. CREATE TABLE public.basic_dml ( id serial primary key, data text, something interval, subonly integer, subonly_def integer DEFAULT 99 ); SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+-----------------------------------------+---------------- public | basic_dml | {id,data,something,subonly,subonly_def} | f (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | (1 row) \c :provider_dsn -- Fails: the column filter list must include the key SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{data, something}'); ERROR: REPLICA IDENTITY columns must be replicated SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | (1 row) -- Fails: the column filter list may not include cols that are not in the table SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{data, something, nosuchcol}'); ERROR: table public.basic_dml does not have column nosuchcol SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | (1 row) -- At provider, add table to replication set, with filtered columns SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{id, data, something}'); replication_set_add_table --------------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+---------------------+---------------- public | basic_dml | {id,data,something} | f (1 row) SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 1 | foo | @ 1 min 2 | bar | @ 84 days 3 | baz | @ 2 years 1 hour 4 | qux | @ 8 mons 2 days 5 | | (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'basic_dml'); wait_for_table_sync_complete ------------------------------ (1 row) COMMIT; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+-----------------------------------------+---------------- public | basic_dml | {id,data,something,subonly,subonly_def} | f (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | (1 row) -- data should get replicated to subscriber SELECT id, data, something FROM basic_dml ORDER BY id; id | data | something ----+------+------------------ 1 | foo | @ 1 min 2 | bar | @ 84 days 3 | baz | @ 2 years 1 hour 4 | qux | @ 8 mons 2 days 5 | | (5 rows) \c :provider_dsn -- Adding a table that's already selectively replicated fails \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true); ERROR: duplicate key value violates unique constraint "replication_set_table_pkey" \set VERBOSITY default SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) -- So does trying to re-add to change the column set \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{id, data}'); ERROR: duplicate key value violates unique constraint "replication_set_table_pkey" \set VERBOSITY default SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) -- Shouldn't be able to drop a replicated col in a rel -- but due to RM#5916 you can BEGIN; ALTER TABLE public.basic_dml DROP COLUMN data; SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+----------------+---------------- public | basic_dml | {id,something} | f (1 row) ROLLBACK; -- Even when wrapped (RM#5916) BEGIN; SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.basic_dml DROP COLUMN data; $$); replicate_ddl_command ----------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+----------------+---------------- public | basic_dml | {id,something} | f (1 row) ROLLBACK; -- CASCADE should be allowed though BEGIN; ALTER TABLE public.basic_dml DROP COLUMN data CASCADE; SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+----------------+---------------- public | basic_dml | {id,something} | f (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) ROLLBACK; BEGIN; SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.basic_dml DROP COLUMN data CASCADE; $$); replicate_ddl_command ----------------------- t (1 row) SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); nspname | relname | att_list | has_row_filter ---------+-----------+----------------+---------------- public | basic_dml | {id,something} | f (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) ROLLBACK; -- We can drop a non-replicated col. We must not replicate this DDL because in -- this case the downstream doesn't have the 'other' column and apply will -- fail. ALTER TABLE public.basic_dml DROP COLUMN other; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/conflict_secondary_unique.out000066400000000000000000000050001415142317000237450ustar00rootroot00000000000000--PRIMARY KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- Test conflicts where a secondary unique constraint with a predicate exits, -- ensuring we don't generate false conflicts. SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.secondary_unique_pred ( a integer PRIMARY KEY, b integer NOT NULL, check_unique boolean NOT NULL ); CREATE UNIQUE INDEX ON public.secondary_unique_pred (b) WHERE (check_unique); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'secondary_unique_pred'); replication_set_add_table --------------------------- t (1 row) INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (1, 1, false); INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (2, 1, false); INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (3, 2, true); -- must fail INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (5, 2, true); ERROR: duplicate key value violates unique constraint "secondary_unique_pred_b_idx" DETAIL: Key (b)=(2) already exists. SELECT * FROM secondary_unique_pred ORDER BY a; a | b | check_unique ---+---+-------------- 1 | 1 | f 2 | 1 | f 3 | 2 | t (3 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM secondary_unique_pred ORDER BY a; a | b | check_unique ---+---+-------------- 1 | 1 | f 2 | 1 | f 3 | 2 | t (3 rows) \c :provider_dsn -- This line doesn't conflict on the provider. On the subscriber -- we must not detect a conflict on (b), since the existing local -- row matches (check_unique) but the new remote row doesn't. So -- this must get applied. INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (4, 2, false); SELECT * FROM secondary_unique_pred ORDER BY a; a | b | check_unique ---+---+-------------- 1 | 1 | f 2 | 1 | f 3 | 2 | t 4 | 2 | f (4 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM secondary_unique_pred ORDER BY a; a | b | check_unique ---+---+-------------- 1 | 1 | f 2 | 1 | f 3 | 2 | t 4 | 2 | f (4 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.secondary_unique_pred CASCADE; $$); NOTICE: drop cascades to table public.secondary_unique_pred membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/copy.out000066400000000000000000000053031415142317000174670ustar00rootroot00000000000000--test COPY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.x ( a serial primary key, b int, c text not null default 'stuff', d text, e text ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'x'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) COPY x (a, b, c, d, e) from stdin; COPY x (b, d) from stdin; COPY x (b, d) from stdin; COPY x (a, b, c, d, e) from stdin; SELECT * FROM x ORDER BY a; a | b | c | d | e -------+----+-------+---------+---- 1 | 1 | stuff | test_1 | 2 | 2 | stuff | test_2 | 3 | 3 | stuff | test_3 | 4 | 4 | stuff | test_4 | 5 | 5 | stuff | test_5 | 6 | 6 | stuff | test_6 | 7 | 7 | stuff | test_7 | 8 | 8 | stuff | test_8 | 9 | 9 | stuff | test_9 | 10 | 10 | stuff | test_10 | 11 | 11 | stuff | test_11 | 12 | 12 | stuff | test_12 | 13 | 13 | stuff | test_13 | 14 | 14 | stuff | test_14 | 15 | 15 | stuff | test_15 | 9999 | | \N | NN | 10000 | 21 | 31 | 41 | 51 10001 | 22 | 32 | 42 | 52 10002 | 23 | 33 | 43 | 53 10003 | 24 | 34 | 44 | 54 10004 | 25 | 35 | 45 | 55 10005 | 26 | 36 | 46 | 56 (22 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM x ORDER BY a; a | b | c | d | e -------+----+-------+---------+---- 1 | 1 | stuff | test_1 | 2 | 2 | stuff | test_2 | 3 | 3 | stuff | test_3 | 4 | 4 | stuff | test_4 | 5 | 5 | stuff | test_5 | 6 | 6 | stuff | test_6 | 7 | 7 | stuff | test_7 | 8 | 8 | stuff | test_8 | 9 | 9 | stuff | test_9 | 10 | 10 | stuff | test_10 | 11 | 11 | stuff | test_11 | 12 | 12 | stuff | test_12 | 13 | 13 | stuff | test_13 | 14 | 14 | stuff | test_14 | 15 | 15 | stuff | test_15 | 9999 | | \N | NN | 10000 | 21 | 31 | 41 | 51 10001 | 22 | 32 | 42 | 52 10002 | 23 | 33 | 43 | 53 10003 | 24 | 34 | 44 | 54 10004 | 25 | 35 | 45 | 55 10005 | 26 | 36 | 46 | 56 (22 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.x CASCADE; $$); NOTICE: drop cascades to table public.x membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/drop.out000066400000000000000000000027741415142317000174720ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider'); ERROR: cannot drop node "test_provider" because one or more replication slots for the node are still active HINT: drop the subscriptions connected to the node first SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT count(*) FROM pg_stat_replication; count ------- 1 (1 row) \c :subscriber_dsn SELECT * FROM pglogical.drop_subscription('test_subscription'); drop_subscription ------------------- 1 (1 row) SELECT * FROM pglogical.drop_node(node_name := 'test_subscriber'); drop_node ----------- t (1 row) \c :provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider'); drop_node ----------- t (1 row) \c :subscriber_dsn DROP OWNED BY nonsuper, super CASCADE; \c :provider_dsn DROP OWNED BY nonsuper, super CASCADE; \c :provider1_dsn DROP OWNED BY nonsuper, super CASCADE; \c :orig_provider_dsn DROP OWNED BY nonsuper, super CASCADE; \c :subscriber_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :provider1_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :orig_provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; pglogical-REL2_4_1/expected/extended.out000066400000000000000000001154761415142317000203320ustar00rootroot00000000000000-- complex datatype handling SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.tst_one_array ( a INTEGER PRIMARY KEY, b INTEGER[] ); CREATE TABLE public.tst_arrays ( a INTEGER[] PRIMARY KEY, b TEXT[], c FLOAT[], d INTERVAL[] ); CREATE TYPE public.tst_enum_t AS ENUM ('a', 'b', 'c', 'd', 'e'); CREATE TABLE public.tst_one_enum ( a INTEGER PRIMARY KEY, b public.tst_enum_t ); CREATE TABLE public.tst_enums ( a public.tst_enum_t PRIMARY KEY, b public.tst_enum_t[] ); CREATE TYPE public.tst_comp_basic_t AS (a FLOAT, b TEXT, c INTEGER); CREATE TYPE public.tst_comp_enum_t AS (a FLOAT, b public.tst_enum_t, c INTEGER); CREATE TYPE public.tst_comp_enum_array_t AS (a FLOAT, b public.tst_enum_t[], c INTEGER); CREATE TABLE public.tst_one_comp ( a INTEGER PRIMARY KEY, b public.tst_comp_basic_t ); CREATE TABLE public.tst_comps ( a public.tst_comp_basic_t PRIMARY KEY, b public.tst_comp_basic_t[] ); CREATE TABLE public.tst_comp_enum ( a INTEGER PRIMARY KEY, b public.tst_comp_enum_t ); CREATE TABLE public.tst_comp_enum_array ( a public.tst_comp_enum_t PRIMARY KEY, b public.tst_comp_enum_t[] ); CREATE TABLE public.tst_comp_one_enum_array ( a INTEGER PRIMARY KEY, b public.tst_comp_enum_array_t ); CREATE TABLE public.tst_comp_enum_what ( a public.tst_comp_enum_array_t PRIMARY KEY, b public.tst_comp_enum_array_t[] ); CREATE TYPE public.tst_comp_mix_t AS ( a public.tst_comp_basic_t, b public.tst_comp_basic_t[], c public.tst_enum_t, d public.tst_enum_t[] ); CREATE TABLE public.tst_comp_mix_array ( a public.tst_comp_mix_t PRIMARY KEY, b public.tst_comp_mix_t[] ); CREATE TABLE public.tst_range ( a INTEGER PRIMARY KEY, b int4range ); CREATE TABLE public.tst_range_array ( a INTEGER PRIMARY KEY, b TSTZRANGE, c int8range[] ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- test_tbl_one_array_col INSERT INTO tst_one_array (a, b) VALUES (1, '{1, 2, 3}'), (2, '{2, 3, 1}'), (3, '{3, 2, 1}'), (4, '{4, 3, 2}'), (5, '{5, NULL, 3}'); -- test_tbl_arrays INSERT INTO tst_arrays (a, b, c, d) VALUES ('{1, 2, 3}', '{"a", "b", "c"}', '{1.1, 2.2, 3.3}', '{"1 day", "2 days", "3 days"}'), ('{2, 3, 1}', '{"b", "c", "a"}', '{2.2, 3.3, 1.1}', '{"2 minutes", "3 minutes", "1 minute"}'), ('{3, 1, 2}', '{"c", "a", "b"}', '{3.3, 1.1, 2.2}', '{"3 years", "1 year", "2 years"}'), ('{4, 1, 2}', '{"d", "a", "b"}', '{4.4, 1.1, 2.2}', '{"4 years", "1 year", "2 years"}'), ('{5, NULL, NULL}', '{"e", NULL, "b"}', '{5.5, 1.1, NULL}', '{"5 years", NULL, NULL}'); -- test_tbl_single_enum INSERT INTO tst_one_enum (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, NULL); -- test_tbl_enums INSERT INTO tst_enums (a, b) VALUES ('a', '{b, c}'), ('b', '{c, a}'), ('c', '{b, a}'), ('d', '{c, b}'), ('e', '{d, NULL}'); -- test_tbl_single_composites INSERT INTO tst_one_comp (a, b) VALUES (1, ROW(1.0, 'a', 1)), (2, ROW(2.0, 'b', 2)), (3, ROW(3.0, 'c', 3)), (4, ROW(4.0, 'd', 4)), (5, ROW(NULL, NULL, 5)); -- test_tbl_composites INSERT INTO tst_comps (a, b) VALUES (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_basic_t]), (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_basic_t]), (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_basic_t]), (ROW(4.0, 'd', 4), ARRAY[ROW(4, 'd', 3)::tst_comp_basic_t]), (ROW(5.0, 'e', NULL), ARRAY[NULL, ROW(5, NULL, 5)::tst_comp_basic_t]); -- test_tbl_composite_with_enums INSERT INTO tst_comp_enum (a, b) VALUES (1, ROW(1.0, 'a', 1)), (2, ROW(2.0, 'b', 2)), (3, ROW(3.0, 'c', 3)), (4, ROW(4.0, 'd', 4)), (5, ROW(NULL, 'e', NULL)); -- test_tbl_composite_with_enums_array INSERT INTO tst_comp_enum_array (a, b) VALUES (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t]), (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_enum_t]), (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_enum_t]), (ROW(4.0, 'd', 3), ARRAY[ROW(3, 'd', 3)::tst_comp_enum_t]), (ROW(5.0, 'e', 3), ARRAY[ROW(3, 'e', 3)::tst_comp_enum_t, NULL]); -- test_tbl_composite_with_single_enums_array_in_composite INSERT INTO tst_comp_one_enum_array (a, b) VALUES (1, ROW(1.0, '{a, b, c}', 1)), (2, ROW(2.0, '{a, b, c}', 2)), (3, ROW(3.0, '{a, b, c}', 3)), (4, ROW(4.0, '{c, b, d}', 4)), (5, ROW(5.0, '{NULL, e, NULL}', 5)); -- test_tbl_composite_with_enums_array_in_composite INSERT INTO tst_comp_enum_what (a, b) VALUES (ROW(1.0, '{a, b, c}', 1), ARRAY[ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t]), (ROW(2.0, '{b, c, a}', 2), ARRAY[ROW(2, '{b, c, a}', 1)::tst_comp_enum_array_t]), (ROW(3.0, '{c, a, b}', 1), ARRAY[ROW(3, '{c, a, b}', 1)::tst_comp_enum_array_t]), (ROW(4.0, '{c, b, d}', 4), ARRAY[ROW(4, '{c, b, d}', 4)::tst_comp_enum_array_t]), (ROW(5.0, '{c, NULL, b}', NULL), ARRAY[ROW(5, '{c, e, b}', 1)::tst_comp_enum_array_t]); -- test_tbl_mixed_composites INSERT INTO tst_comp_mix_array (a, b) VALUES (ROW( ROW(1,'a',1), ARRAY[ROW(1,'a',1)::tst_comp_basic_t, ROW(2,'b',2)::tst_comp_basic_t], 'a', '{a,b,NULL,c}'), ARRAY[ ROW( ROW(1,'a',1), ARRAY[ ROW(1,'a',1)::tst_comp_basic_t, ROW(2,'b',2)::tst_comp_basic_t, NULL ], 'a', '{a,b,c}' )::tst_comp_mix_t ] ); -- test_tbl_range INSERT INTO tst_range (a, b) VALUES (1, '[1, 10]'), (2, '[2, 20]'), (3, '[3, 30]'), (4, '[4, 40]'), (5, '[5, 50]'); -- test_tbl_range_array INSERT INTO tst_range_array (a, b, c) VALUES (1, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), '{"[1,2]", "[10,20]"}'), (2, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '2 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[2,3]", "[20,30]"}'), (3, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '3 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[3,4]"}'), (4, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '4 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[4,5]", NULL, "[40,50]"}'), (5, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+------------ 1 | {1,2,3} 2 | {2,3,1} 3 | {3,2,1} 4 | {4,3,2} 5 | {5,NULL,3} (5 rows) SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---------------+------------+----------------+-------------------------------------- {1,2,3} | {a,b,c} | {1.1,2.2,3.3} | {"@ 1 day","@ 2 days","@ 3 days"} {2,3,1} | {b,c,a} | {2.2,3.3,1.1} | {"@ 2 mins","@ 3 mins","@ 1 min"} {3,1,2} | {c,a,b} | {3.3,1.1,2.2} | {"@ 3 years","@ 1 year","@ 2 years"} {4,1,2} | {d,a,b} | {4.4,1.1,2.2} | {"@ 4 years","@ 1 year","@ 2 years"} {5,NULL,NULL} | {e,NULL,b} | {5.5,1.1,NULL} | {"@ 5 years",NULL,NULL} (5 rows) SELECT a, b FROM tst_one_enum ORDER BY a; a | b ---+--- 1 | a 2 | b 3 | c 4 | d 5 | (5 rows) SELECT a, b FROM tst_enums ORDER BY a; a | b ---+---------- a | {b,c} b | {c,a} c | {b,a} d | {c,b} e | {d,NULL} (5 rows) SELECT a, b FROM tst_one_comp ORDER BY a; a | b ---+--------- 1 | (1,a,1) 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,4) 5 | (,,5) (5 rows) SELECT a, b FROM tst_comps ORDER BY a; a | b ---------+----------------- (1,a,1) | {"(1,a,1)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,4) | {"(4,d,3)"} (5,e,) | {NULL,"(5,,5)"} (5 rows) SELECT a, b FROM tst_comp_enum ORDER BY a; a | b ---+--------- 1 | (1,a,1) 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,4) 5 | (,e,) (5 rows) SELECT a, b FROM tst_comp_enum_array ORDER BY a; a | b ---------+------------------ (1,a,1) | {"(1,a,1)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,3) | {"(3,d,3)"} (5,e,3) | {"(3,e,3)",NULL} (5 rows) SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; a | b ---+----------------------- 1 | (1,"{a,b,c}",1) 2 | (2,"{a,b,c}",2) 3 | (3,"{a,b,c}",3) 4 | (4,"{c,b,d}",4) 5 | (5,"{NULL,e,NULL}",5) (5 rows) SELECT a, b FROM tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------- (1,"{a,b,c}",1) | {"(1,\"{a,b,c}\",1)"} (2,"{b,c,a}",2) | {"(2,\"{b,c,a}\",1)"} (3,"{c,a,b}",1) | {"(3,\"{c,a,b}\",1)"} (4,"{c,b,d}",4) | {"(4,\"{c,b,d}\",4)"} (5,"{c,NULL,b}",) | {"(5,\"{c,e,b}\",1)"} (5 rows) SELECT a, b FROM tst_comp_mix_array ORDER BY a; a | b ----------------------------------------------------------+---------------------------------------------------------------------------- ("(1,a,1)","{""(1,a,1)"",""(2,b,2)""}",a,"{a,b,NULL,c}") | {"(\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\",NULL}\",a,\"{a,b,c}\")"} (1 row) SELECT a, b FROM tst_range ORDER BY a; a | b ---+-------- 1 | [1,11) 2 | [2,21) 3 | [3,31) 4 | [4,41) 5 | [5,51) (5 rows) SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+-------------------------- 1 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {"[1,3)","[10,21)"} 2 | ["Fri Aug 01 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[2,4)","[20,31)"} 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} 4 | ["Wed Jul 30 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[4,6)",NULL,"[40,51)"} 5 | | (5 rows) -- test_tbl_one_array_col \c :provider_dsn UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+------------ 1 | {4,5,6} 2 | {2,3,1} 3 | {3,2,1} 4 | {4,3,2} 5 | {5,NULL,3} (5 rows) \c :provider_dsn UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+----------- 1 | {4,5,6} 2 | {2,3,1} 3 | {3,2,1} 4 | {4,5,6,1} 5 | {4,5,6,1} (5 rows) \c :provider_dsn DELETE FROM tst_one_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+----------- 2 | {2,3,1} 3 | {3,2,1} 4 | {4,5,6,1} 5 | {4,5,6,1} (4 rows) \c :provider_dsn DELETE FROM tst_one_array WHERE b = '{2, 3, 1}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+----------- 3 | {3,2,1} 4 | {4,5,6,1} 5 | {4,5,6,1} (3 rows) \c :provider_dsn DELETE FROM tst_one_array WHERE 1 = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; a | b ---+--- (0 rows) -- test_tbl_arrays \c :provider_dsn UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---------------+------------+----------------+------------------------------------------------------- {1,2,3} | {1a,2b,3c} | {1,2,3} | {"@ 1 day 1 sec","@ 2 days 2 secs","@ 3 days 3 secs"} {2,3,1} | {b,c,a} | {2.2,3.3,1.1} | {"@ 2 mins","@ 3 mins","@ 1 min"} {3,1,2} | {c,a,b} | {3.3,1.1,2.2} | {"@ 3 years","@ 1 year","@ 2 years"} {4,1,2} | {d,a,b} | {4.4,1.1,2.2} | {"@ 4 years","@ 1 year","@ 2 years"} {5,NULL,NULL} | {e,NULL,b} | {5.5,1.1,NULL} | {"@ 5 years",NULL,NULL} (5 rows) \c :provider_dsn UPDATE tst_arrays SET b = '{"c", "d", "e"}', c = '{3.0, 4.0, 5.0}', d = '{"3 day 1 second", "4 days 2 seconds", "5 days 3 second"}' WHERE a[1] > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---------------+------------+---------------+-------------------------------------------------------- {1,2,3} | {1a,2b,3c} | {1,2,3} | {"@ 1 day 1 sec","@ 2 days 2 secs","@ 3 days 3 secs"} {2,3,1} | {b,c,a} | {2.2,3.3,1.1} | {"@ 2 mins","@ 3 mins","@ 1 min"} {3,1,2} | {c,a,b} | {3.3,1.1,2.2} | {"@ 3 years","@ 1 year","@ 2 years"} {4,1,2} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} {5,NULL,NULL} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} (5 rows) \c :provider_dsn DELETE FROM tst_arrays WHERE a = '{1, 2, 3}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---------------+---------+---------------+-------------------------------------------------------- {2,3,1} | {b,c,a} | {2.2,3.3,1.1} | {"@ 2 mins","@ 3 mins","@ 1 min"} {3,1,2} | {c,a,b} | {3.3,1.1,2.2} | {"@ 3 years","@ 1 year","@ 2 years"} {4,1,2} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} {5,NULL,NULL} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} (4 rows) \c :provider_dsn DELETE FROM tst_arrays WHERE a[1] = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---------------+---------+---------------+-------------------------------------------------------- {3,1,2} | {c,a,b} | {3.3,1.1,2.2} | {"@ 3 years","@ 1 year","@ 2 years"} {4,1,2} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} {5,NULL,NULL} | {c,d,e} | {3,4,5} | {"@ 3 days 1 sec","@ 4 days 2 secs","@ 5 days 3 secs"} (3 rows) \c :provider_dsn DELETE FROM tst_arrays WHERE b[1] = 'c'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; a | b | c | d ---+---+---+--- (0 rows) -- test_tbl_single_enum \c :provider_dsn UPDATE tst_one_enum SET b = 'c' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; a | b ---+--- 1 | c 2 | b 3 | c 4 | d 5 | (5 rows) \c :provider_dsn UPDATE tst_one_enum SET b = NULL WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; a | b ---+--- 1 | c 2 | b 3 | c 4 | 5 | (5 rows) \c :provider_dsn DELETE FROM tst_one_enum WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; a | b ---+--- 2 | b 3 | c 4 | 5 | (4 rows) \c :provider_dsn DELETE FROM tst_one_enum WHERE b = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; a | b ---+--- 3 | c 4 | 5 | (3 rows) -- test_tbl_enums \c :provider_dsn UPDATE tst_enums SET b = '{e, NULL}' WHERE a = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_enums; a | b ---+---------- b | {c,a} c | {b,a} d | {c,b} e | {d,NULL} a | {e,NULL} (5 rows) \c :provider_dsn UPDATE tst_enums SET b = '{e, d}' WHERE a > 'c'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_enums; a | b ---+---------- b | {c,a} c | {b,a} a | {e,NULL} d | {e,d} e | {e,d} (5 rows) \c :provider_dsn DELETE FROM tst_enums WHERE a = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_enums; a | b ---+------- b | {c,a} c | {b,a} d | {e,d} e | {e,d} (4 rows) \c :provider_dsn DELETE FROM tst_enums WHERE 'c' = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_enums; a | b ---+------- c | {b,a} d | {e,d} e | {e,d} (3 rows) \c :provider_dsn DELETE FROM tst_enums WHERE b[1] = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_enums; a | b ---+------- d | {e,d} e | {e,d} (2 rows) -- test_tbl_single_composites \c :provider_dsn UPDATE tst_one_comp SET b = ROW(1.0, 'A', 1) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; a | b ---+--------- 1 | (1,A,1) 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,4) 5 | (,,5) (5 rows) \c :provider_dsn UPDATE tst_one_comp SET b = ROW(NULL, 'x', -1) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; a | b ---+--------- 1 | (1,A,1) 2 | (2,b,2) 3 | (3,c,3) 4 | (,x,-1) 5 | (,x,-1) (5 rows) \c :provider_dsn DELETE FROM tst_one_comp WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; a | b ---+--------- 2 | (2,b,2) 3 | (3,c,3) 4 | (,x,-1) 5 | (,x,-1) (4 rows) \c :provider_dsn DELETE FROM tst_one_comp WHERE (b).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; a | b ---+--------- 3 | (3,c,3) 4 | (,x,-1) 5 | (,x,-1) (3 rows) -- test_tbl_composites \c :provider_dsn UPDATE tst_comps SET b = ARRAY[ROW(9, 'x', -1)::tst_comp_basic_t] WHERE (a).a = 1.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comps ORDER BY a; a | b ---------+----------------- (1,a,1) | {"(9,x,-1)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,4) | {"(4,d,3)"} (5,e,) | {NULL,"(5,,5)"} (5 rows) \c :provider_dsn UPDATE tst_comps SET b = ARRAY[NULL, ROW(9, 'x', NULL)::tst_comp_basic_t] WHERE (a).a > 3.9; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comps ORDER BY a; a | b ---------+----------------- (1,a,1) | {"(9,x,-1)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,4) | {NULL,"(9,x,)"} (5,e,) | {NULL,"(9,x,)"} (5 rows) \c :provider_dsn DELETE FROM tst_comps WHERE (a).b = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; a | b ---------+----------------- (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,4) | {NULL,"(9,x,)"} (5,e,) | {NULL,"(9,x,)"} (4 rows) \c :provider_dsn DELETE FROM tst_comps WHERE (b[1]).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; a | b ---------+----------------- (3,c,3) | {"(3,c,3)"} (4,d,4) | {NULL,"(9,x,)"} (5,e,) | {NULL,"(9,x,)"} (3 rows) \c :provider_dsn DELETE FROM tst_comps WHERE ROW(3, 'c', 3)::tst_comp_basic_t = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; a | b ---------+----------------- (4,d,4) | {NULL,"(9,x,)"} (5,e,) | {NULL,"(9,x,)"} (2 rows) -- test_tbl_composite_with_enums \c :provider_dsn UPDATE tst_comp_enum SET b = ROW(1.0, NULL, NULL) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum ORDER BY a; a | b ---+--------- 1 | (1,,) 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,4) 5 | (,e,) (5 rows) \c :provider_dsn UPDATE tst_comp_enum SET b = ROW(4.0, 'd', 44) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum ORDER BY a; a | b ---+---------- 1 | (1,,) 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,44) 5 | (4,d,44) (5 rows) \c :provider_dsn DELETE FROM tst_comp_enum WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum ORDER BY a; a | b ---+---------- 2 | (2,b,2) 3 | (3,c,3) 4 | (4,d,44) 5 | (4,d,44) (4 rows) \c :provider_dsn DELETE FROM tst_comp_enum WHERE (b).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum ORDER BY a; a | b ---+---------- 3 | (3,c,3) 4 | (4,d,44) 5 | (4,d,44) (3 rows) -- test_tbl_composite_with_enums_array \c :provider_dsn UPDATE tst_comp_enum_array SET b = ARRAY[NULL, ROW(3, 'd', 3)::tst_comp_enum_t] WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum_array ORDER BY a; a | b ---------+------------------ (1,a,1) | {NULL,"(3,d,3)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,3) | {"(3,d,3)"} (5,e,3) | {"(3,e,3)",NULL} (5 rows) \c :provider_dsn UPDATE tst_comp_enum_array SET b = ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t, ROW(2, 'b', 2)::tst_comp_enum_t] WHERE (a).a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum_array ORDER BY a; a | b ---------+----------------------- (1,a,1) | {NULL,"(3,d,3)"} (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,3) | {"(1,a,1)","(2,b,2)"} (5,e,3) | {"(1,a,1)","(2,b,2)"} (5 rows) \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; a | b ---------+----------------------- (2,b,2) | {"(2,b,2)"} (3,c,3) | {"(3,c,3)"} (4,d,3) | {"(1,a,1)","(2,b,2)"} (5,e,3) | {"(1,a,1)","(2,b,2)"} (4 rows) \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE (b[1]).b = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; a | b ---------+----------------------- (3,c,3) | {"(3,c,3)"} (4,d,3) | {"(1,a,1)","(2,b,2)"} (5,e,3) | {"(1,a,1)","(2,b,2)"} (3 rows) \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE ROW(3, 'c', 3)::tst_comp_enum_t = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; a | b ---------+----------------------- (4,d,3) | {"(1,a,1)","(2,b,2)"} (5,e,3) | {"(1,a,1)","(2,b,2)"} (2 rows) -- test_tbl_composite_with_single_enums_array_in_composite \c :provider_dsn UPDATE tst_comp_one_enum_array SET b = ROW(1.0, '{a, e, c}', NULL) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_one_enum_array ORDER BY a; a | b ---+----------------------- 1 | (1,"{a,e,c}",) 2 | (2,"{a,b,c}",2) 3 | (3,"{a,b,c}",3) 4 | (4,"{c,b,d}",4) 5 | (5,"{NULL,e,NULL}",5) (5 rows) \c :provider_dsn UPDATE tst_comp_one_enum_array SET b = ROW(4.0, '{c, b, d}', 4) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_one_enum_array ORDER BY a; a | b ---+----------------- 1 | (1,"{a,e,c}",) 2 | (2,"{a,b,c}",2) 3 | (3,"{a,b,c}",3) 4 | (4,"{c,b,d}",4) 5 | (4,"{c,b,d}",4) (5 rows) \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; a | b ---+----------------- 2 | (2,"{a,b,c}",2) 3 | (3,"{a,b,c}",3) 4 | (4,"{c,b,d}",4) 5 | (4,"{c,b,d}",4) (4 rows) \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE (b).c = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; a | b ---+----------------- 3 | (3,"{a,b,c}",3) 4 | (4,"{c,b,d}",4) 5 | (4,"{c,b,d}",4) (3 rows) \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE 'a' = ANY((b).b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; a | b ---+----------------- 4 | (4,"{c,b,d}",4) 5 | (4,"{c,b,d}",4) (2 rows) -- test_tbl_composite_with_enums_array_in_composite \c :provider_dsn UPDATE tst_comp_enum_what SET b = ARRAY[NULL, ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t, ROW(NULL, '{a, e, c}', 2)::tst_comp_enum_array_t] WHERE (a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------------------------------- (1,"{a,b,c}",1) | {NULL,"(1,\"{a,b,c}\",1)","(,\"{a,e,c}\",2)"} (2,"{b,c,a}",2) | {"(2,\"{b,c,a}\",1)"} (3,"{c,a,b}",1) | {"(3,\"{c,a,b}\",1)"} (4,"{c,b,d}",4) | {"(4,\"{c,b,d}\",4)"} (5,"{c,NULL,b}",) | {"(5,\"{c,e,b}\",1)"} (5 rows) \c :provider_dsn UPDATE tst_comp_enum_what SET b = ARRAY[ROW(5, '{a, b, c}', 5)::tst_comp_enum_array_t] WHERE (a).a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b from tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------------------------------- (1,"{a,b,c}",1) | {NULL,"(1,\"{a,b,c}\",1)","(,\"{a,e,c}\",2)"} (2,"{b,c,a}",2) | {"(2,\"{b,c,a}\",1)"} (3,"{c,a,b}",1) | {"(3,\"{c,a,b}\",1)"} (4,"{c,b,d}",4) | {"(5,\"{a,b,c}\",5)"} (5,"{c,NULL,b}",) | {"(5,\"{a,b,c}\",5)"} (5 rows) \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------- (2,"{b,c,a}",2) | {"(2,\"{b,c,a}\",1)"} (3,"{c,a,b}",1) | {"(3,\"{c,a,b}\",1)"} (4,"{c,b,d}",4) | {"(5,\"{a,b,c}\",5)"} (5,"{c,NULL,b}",) | {"(5,\"{a,b,c}\",5)"} (4 rows) \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (b[1]).a = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------- (3,"{c,a,b}",1) | {"(3,\"{c,a,b}\",1)"} (4,"{c,b,d}",4) | {"(5,\"{a,b,c}\",5)"} (5,"{c,NULL,b}",) | {"(5,\"{a,b,c}\",5)"} (3 rows) \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (b[1]).b = '{c, a, b}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; a | b -------------------+----------------------- (4,"{c,b,d}",4) | {"(5,\"{a,b,c}\",5)"} (5,"{c,NULL,b}",) | {"(5,\"{a,b,c}\",5)"} (2 rows) -- test_tbl_mixed_composites \c :provider_dsn UPDATE tst_comp_mix_array SET b[2] = NULL WHERE ((a).a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_mix_array ORDER BY a; a | b ----------------------------------------------------------+--------------------------------------------------------------------------------- ("(1,a,1)","{""(1,a,1)"",""(2,b,2)""}",a,"{a,b,NULL,c}") | {"(\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\",NULL}\",a,\"{a,b,c}\")",NULL} (1 row) \c :provider_dsn DELETE FROM tst_comp_mix_array WHERE ((a).a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_comp_mix_array ORDER BY a; a | b ---+--- (0 rows) -- test_tbl_range \c :provider_dsn UPDATE tst_range SET b = '[100, 1000]' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; a | b ---+------------ 1 | [100,1001) 2 | [2,21) 3 | [3,31) 4 | [4,41) 5 | [5,51) (5 rows) \c :provider_dsn UPDATE tst_range SET b = '(1, 90)' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; a | b ---+------------ 1 | [100,1001) 2 | [2,21) 3 | [3,31) 4 | [2,90) 5 | [2,90) (5 rows) \c :provider_dsn DELETE FROM tst_range WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; a | b ---+-------- 2 | [2,21) 3 | [3,31) 4 | [2,90) 5 | [2,90) (4 rows) \c :provider_dsn DELETE FROM tst_range WHERE b = '[2, 20]'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; a | b ---+-------- 3 | [3,31) 4 | [2,90) 5 | [2,90) (3 rows) \c :provider_dsn DELETE FROM tst_range WHERE '[10,20]' && b; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; a | b ---+--- (0 rows) -- test_tbl_range_array \c :provider_dsn UPDATE tst_range_array SET c = '{"[100, 1000]"}' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+-------------------------- 1 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {"[100,1001)"} 2 | ["Fri Aug 01 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[2,4)","[20,31)"} 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} 4 | ["Wed Jul 30 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[4,6)",NULL,"[40,51)"} 5 | | (5 rows) \c :provider_dsn UPDATE tst_range_array SET b = tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), c = '{NULL, "[11,9999999]"}' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+------------------------ 1 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {"[100,1001)"} 2 | ["Fri Aug 01 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[2,4)","[20,31)"} 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} 4 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} 5 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} (5 rows) \c :provider_dsn DELETE FROM tst_range_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+------------------------ 2 | ["Fri Aug 01 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[2,4)","[20,31)"} 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} 4 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} 5 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} (4 rows) \c :provider_dsn DELETE FROM tst_range_array WHERE b = tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '2 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+------------------------ 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} 4 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} 5 | ["Sun Aug 03 15:00:00 2014 PDT",infinity) | {NULL,"[11,10000000)"} (3 rows) \c :provider_dsn DELETE FROM tst_range_array WHERE tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 05 00:00:00 2014 CEST'::timestamptz) && b; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; a | b | c ---+-----------------------------------------------------------------+----------- 3 | ["Thu Jul 31 15:00:00 2014 PDT","Sun Aug 03 15:00:00 2014 PDT") | {"[3,5)"} (1 row) \c :provider_dsn -- Verify that swap_relation_files(...) breaks replication -- as invoked by CLUSTER, VACUUM FULL, or REFRESH MATERIALIZED VIEW VACUUM FULL tst_one_array; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.tst_one_array CASCADE; DROP TABLE public.tst_arrays CASCADE; DROP TABLE public.tst_one_enum CASCADE; DROP TABLE public.tst_enums CASCADE; DROP TABLE public.tst_one_comp CASCADE; DROP TABLE public.tst_comps CASCADE; DROP TABLE public.tst_comp_enum CASCADE; DROP TABLE public.tst_comp_enum_array CASCADE; DROP TABLE public.tst_comp_one_enum_array CASCADE; DROP TABLE public.tst_comp_enum_what CASCADE; DROP TABLE public.tst_comp_mix_array CASCADE; DROP TABLE public.tst_range CASCADE; DROP TABLE public.tst_range_array CASCADE; DROP TYPE public.tst_comp_mix_t; DROP TYPE public.tst_comp_enum_array_t; DROP TYPE public.tst_comp_enum_t; DROP TYPE public.tst_comp_basic_t; DROP TYPE public.tst_enum_t; $$); NOTICE: drop cascades to table public.tst_one_array membership in replication set default NOTICE: drop cascades to table public.tst_arrays membership in replication set default NOTICE: drop cascades to table public.tst_one_enum membership in replication set default NOTICE: drop cascades to table public.tst_enums membership in replication set default NOTICE: drop cascades to table public.tst_one_comp membership in replication set default NOTICE: drop cascades to table public.tst_comps membership in replication set default NOTICE: drop cascades to table public.tst_comp_enum membership in replication set default NOTICE: drop cascades to table public.tst_comp_enum_array membership in replication set default NOTICE: drop cascades to table public.tst_comp_one_enum_array membership in replication set default NOTICE: drop cascades to table public.tst_comp_enum_what membership in replication set default NOTICE: drop cascades to table public.tst_comp_mix_array membership in replication set default NOTICE: drop cascades to table public.tst_range membership in replication set default NOTICE: drop cascades to table public.tst_range_array membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/foreign_key.out000066400000000000000000000042721415142317000210220ustar00rootroot00000000000000--FOREIGN KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.f1k_products ( product_no integer PRIMARY KEY, product_id integer, name text, price numeric ); CREATE TABLE public.f1k_orders ( order_id integer, product_no integer REFERENCES public.f1k_products (product_no), quantity integer ); --pass $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'f1k_products'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default_insert_only', 'f1k_orders'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT into public.f1k_products VALUES (1, 1, 'product1', 1.20); INSERT into public.f1k_products VALUES (2, 2, 'product2', 2.40); INSERT into public.f1k_orders VALUES (300, 1, 4); INSERT into public.f1k_orders VALUES (22, 2, 14); INSERT into public.f1k_orders VALUES (23, 2, 24); INSERT into public.f1k_orders VALUES (24, 2, 40); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.f1k_products; product_no | product_id | name | price ------------+------------+----------+------- 1 | 1 | product1 | 1.20 2 | 2 | product2 | 2.40 (2 rows) SELECT * FROM public.f1k_orders; order_id | product_no | quantity ----------+------------+---------- 300 | 1 | 4 22 | 2 | 14 23 | 2 | 24 24 | 2 | 40 (4 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.f1k_orders CASCADE; DROP TABLE public.f1k_products CASCADE; $$); NOTICE: drop cascades to table public.f1k_orders membership in replication set default_insert_only NOTICE: drop cascades to table public.f1k_products membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/functions.out000066400000000000000000000271051415142317000205310ustar00rootroot00000000000000--Immutable, volatile functions and nextval in DEFAULT clause SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE TABLE public.funct2( a integer, b integer, c integer DEFAULT public.add(10,12 ) ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default_insert_only', 'public.funct2'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct2(a,b) VALUES (1,2);--c should be 22 INSERT INTO public.funct2(a,b,c) VALUES (3,4,5);-- c should be 5 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * from public.funct2; a | b | c ---+---+---- 1 | 2 | 22 3 | 4 | 5 (2 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ create or replace function public.get_curr_century() returns double precision as 'SELECT EXTRACT(CENTURY FROM NOW());' language sql volatile; CREATE TABLE public.funct5( a integer, b integer, c double precision DEFAULT public.get_curr_century() ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct5(a,b) VALUES (1,2);--c should be e.g. 21 for 2015 INSERT INTO public.funct5(a,b,c) VALUES (3,4,20);-- c should be 20 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * from public.funct5; a | b | c ---+---+---- 1 | 2 | 21 3 | 4 | 20 (2 rows) --nextval check \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SEQUENCE public.INSERT_SEQ; CREATE TABLE public.funct ( a integer, b INT DEFAULT nextval('public.insert_seq') ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct (a) VALUES (1); INSERT INTO public.funct (a) VALUES (2); INSERT INTO public.funct (a) VALUES (3); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.funct; a | b ---+--- 1 | 1 2 | 2 3 | 3 (3 rows) \c :provider_dsn BEGIN; COMMIT;--empty transaction SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.funct; a | b ---+--- 1 | 1 2 | 2 3 | 3 (3 rows) -- test replication where the destination table has extra (nullable) columns that are not in the origin table \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'nullcheck_tbl'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; id | id1 | name ----+-----+------- 1 | 1 | name1 2 | 2 | name2 (2 rows) ALTER TABLE public.nullcheck_tbl ADD COLUMN name1 text; SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+-------+------- 1 | 1 | name1 | 2 | 2 | name2 | (2 rows) \c :provider_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+-------+------- 1 | 1 | name1 | 2 | 2 | name2 | 3 | 3 | name3 | 4 | 4 | name4 | (4 rows) \c :provider_dsn UPDATE public.nullcheck_tbl SET name='name31' where id = 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (6,6,'name6'); SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+--------+------- 1 | 1 | name1 | 2 | 2 | name2 | 4 | 4 | name4 | 3 | 3 | name31 | 6 | 6 | name6 | (5 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.not_nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'not_nullcheck_tbl'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null; \c :provider_dsn SELECT quote_literal(pg_current_xlog_location()) as curr_lsn \gset INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, :curr_lsn); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+------+----- (0 rows) INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); ERROR: null value in column "id2" violates not-null constraint DETAIL: Failing row contains (3, 3, name3, null). SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+------+----- (0 rows) SELECT pglogical.alter_subscription_disable('test_subscription', true); alter_subscription_disable ---------------------------- t (1 row) \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = false) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); action | data --------+----------------------------------------------------------------------------------------------------- "S" | "B" | "I" | {"action":"I","relation":["public","not_nullcheck_tbl"],"newtuple":{"id":1,"id1":1,"name":"name1"}} "C" | (4 rows) SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); action | data --------+----------------------------------------------------------------------------------------------------- "S" | "B" | "I" | {"action":"I","relation":["public","not_nullcheck_tbl"],"newtuple":{"id":2,"id1":2,"name":"name2"}} "C" | (4 rows) \c :subscriber_dsn SELECT pglogical.alter_subscription_enable('test_subscription', true); alter_subscription_enable --------------------------- t (1 row) ALTER TABLE public.not_nullcheck_tbl ALTER COLUMN id2 SET default 99; \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = true) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); -- id2 will be 99 on subscriber ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null default 0; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (5,5,'name5'); -- id2 will be 0 on both SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+-------+----- 4 | 4 | name4 | 99 5 | 5 | name5 | 0 (2 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.some_prime_numbers() RETURNS SETOF integer LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ VALUES (2), (3), (5), (7), (11), (13), (17), (19), (23), (29), (31), (37), (41), (43), (47), (53), (59), (61), (67), (71), (73), (79), (83), (89), (97) $_$; CREATE FUNCTION public.is_prime_lt_100(integer) RETURNS boolean LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ SELECT EXISTS (SELECT FROM public.some_prime_numbers() s(p) WHERE p = $1) $_$; CREATE DOMAIN public.prime AS integer CONSTRAINT prime_check CHECK(public.is_prime_lt_100(VALUE)); CREATE TABLE public.prime_tbl ( num public.prime NOT NULL, PRIMARY KEY(num) ); INSERT INTO public.prime_tbl (num) VALUES(17), (31), (79); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'public.prime_tbl'); replication_set_add_table --------------------------- t (1 row) DELETE FROM public.prime_tbl WHERE num = 31; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT num FROM public.prime_tbl; num ----- 17 79 (2 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.funct CASCADE; DROP SEQUENCE public.INSERT_SEQ; DROP TABLE public.funct2 CASCADE; DROP TABLE public.funct5 CASCADE; DROP FUNCTION public.get_curr_century(); DROP FUNCTION public.add(integer, integer); DROP TABLE public.nullcheck_tbl CASCADE; DROP TABLE public.not_nullcheck_tbl CASCADE; DROP TABLE public.prime_tbl CASCADE; DROP DOMAIN public.prime; DROP FUNCTION public.is_prime_lt_100(integer); DROP FUNCTION public.some_prime_numbers(); $$); NOTICE: drop cascades to table public.funct membership in replication set default_insert_only NOTICE: drop cascades to table public.funct2 membership in replication set default_insert_only NOTICE: drop cascades to table public.funct5 membership in replication set default_insert_only NOTICE: drop cascades to table public.nullcheck_tbl membership in replication set default NOTICE: drop cascades to table public.not_nullcheck_tbl membership in replication set default NOTICE: drop cascades to table public.prime_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/functions_1.out000066400000000000000000000271451415142317000207550ustar00rootroot00000000000000--Immutable, volatile functions and nextval in DEFAULT clause SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE TABLE public.funct2( a integer, b integer, c integer DEFAULT public.add(10,12 ) ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default_insert_only', 'public.funct2'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct2(a,b) VALUES (1,2);--c should be 22 INSERT INTO public.funct2(a,b,c) VALUES (3,4,5);-- c should be 5 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * from public.funct2; a | b | c ---+---+---- 1 | 2 | 22 3 | 4 | 5 (2 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ create or replace function public.get_curr_century() returns double precision as 'SELECT EXTRACT(CENTURY FROM NOW());' language sql volatile; CREATE TABLE public.funct5( a integer, b integer, c double precision DEFAULT public.get_curr_century() ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct5(a,b) VALUES (1,2);--c should be e.g. 21 for 2015 INSERT INTO public.funct5(a,b,c) VALUES (3,4,20);-- c should be 20 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * from public.funct5; a | b | c ---+---+---- 1 | 2 | 21 3 | 4 | 20 (2 rows) --nextval check \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SEQUENCE public.INSERT_SEQ; CREATE TABLE public.funct ( a integer, b INT DEFAULT nextval('public.insert_seq') ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.funct (a) VALUES (1); INSERT INTO public.funct (a) VALUES (2); INSERT INTO public.funct (a) VALUES (3); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.funct; a | b ---+--- 1 | 1 2 | 2 3 | 3 (3 rows) \c :provider_dsn BEGIN; COMMIT;--empty transaction SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.funct; a | b ---+--- 1 | 1 2 | 2 3 | 3 (3 rows) -- test replication where the destination table has extra (nullable) columns that are not in the origin table \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'nullcheck_tbl'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; id | id1 | name ----+-----+------- 1 | 1 | name1 2 | 2 | name2 (2 rows) ALTER TABLE public.nullcheck_tbl ADD COLUMN name1 text; SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+-------+------- 1 | 1 | name1 | 2 | 2 | name2 | (2 rows) \c :provider_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+-------+------- 1 | 1 | name1 | 2 | 2 | name2 | 3 | 3 | name3 | 4 | 4 | name4 | (4 rows) \c :provider_dsn UPDATE public.nullcheck_tbl SET name='name31' where id = 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (6,6,'name6'); SELECT * FROM public.nullcheck_tbl; id | id1 | name | name1 ----+-----+--------+------- 1 | 1 | name1 | 2 | 2 | name2 | 4 | 4 | name4 | 3 | 3 | name31 | 6 | 6 | name6 | (5 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.not_nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'not_nullcheck_tbl'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null; \c :provider_dsn SELECT quote_literal(pg_current_xlog_location()) as curr_lsn \gset INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, :curr_lsn); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+------+----- (0 rows) INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); ERROR: null value in column "id2" of relation "not_nullcheck_tbl" violates not-null constraint DETAIL: Failing row contains (3, 3, name3, null). SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+------+----- (0 rows) SELECT pglogical.alter_subscription_disable('test_subscription', true); alter_subscription_disable ---------------------------- t (1 row) \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = false) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); action | data --------+----------------------------------------------------------------------------------------------------- "S" | "B" | "I" | {"action":"I","relation":["public","not_nullcheck_tbl"],"newtuple":{"id":1,"id1":1,"name":"name1"}} "C" | (4 rows) SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); action | data --------+----------------------------------------------------------------------------------------------------- "S" | "B" | "I" | {"action":"I","relation":["public","not_nullcheck_tbl"],"newtuple":{"id":2,"id1":2,"name":"name2"}} "C" | (4 rows) \c :subscriber_dsn SELECT pglogical.alter_subscription_enable('test_subscription', true); alter_subscription_enable --------------------------- t (1 row) ALTER TABLE public.not_nullcheck_tbl ALTER COLUMN id2 SET default 99; \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = true) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); -- id2 will be 99 on subscriber ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null default 0; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (5,5,'name5'); -- id2 will be 0 on both SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; id | id1 | name | id2 ----+-----+-------+----- 4 | 4 | name4 | 99 5 | 5 | name5 | 0 (2 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.some_prime_numbers() RETURNS SETOF integer LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ VALUES (2), (3), (5), (7), (11), (13), (17), (19), (23), (29), (31), (37), (41), (43), (47), (53), (59), (61), (67), (71), (73), (79), (83), (89), (97) $_$; CREATE FUNCTION public.is_prime_lt_100(integer) RETURNS boolean LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ SELECT EXISTS (SELECT FROM public.some_prime_numbers() s(p) WHERE p = $1) $_$; CREATE DOMAIN public.prime AS integer CONSTRAINT prime_check CHECK(public.is_prime_lt_100(VALUE)); CREATE TABLE public.prime_tbl ( num public.prime NOT NULL, PRIMARY KEY(num) ); INSERT INTO public.prime_tbl (num) VALUES(17), (31), (79); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'public.prime_tbl'); replication_set_add_table --------------------------- t (1 row) DELETE FROM public.prime_tbl WHERE num = 31; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT num FROM public.prime_tbl; num ----- 17 79 (2 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.funct CASCADE; DROP SEQUENCE public.INSERT_SEQ; DROP TABLE public.funct2 CASCADE; DROP TABLE public.funct5 CASCADE; DROP FUNCTION public.get_curr_century(); DROP FUNCTION public.add(integer, integer); DROP TABLE public.nullcheck_tbl CASCADE; DROP TABLE public.not_nullcheck_tbl CASCADE; DROP TABLE public.prime_tbl CASCADE; DROP DOMAIN public.prime; DROP FUNCTION public.is_prime_lt_100(integer); DROP FUNCTION public.some_prime_numbers(); $$); NOTICE: drop cascades to table public.funct membership in replication set default_insert_only NOTICE: drop cascades to table public.funct2 membership in replication set default_insert_only NOTICE: drop cascades to table public.funct5 membership in replication set default_insert_only NOTICE: drop cascades to table public.nullcheck_tbl membership in replication set default NOTICE: drop cascades to table public.not_nullcheck_tbl membership in replication set default NOTICE: drop cascades to table public.prime_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/huge_tx.out000066400000000000000000000050621415142317000201620ustar00rootroot00000000000000-- test huge transactions SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- lots of small rows replication with DDL outside transaction SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.a_huge ( id integer primary key, id1 integer, data text default 'data', data1 text default 'data1' ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'a_huge'); replication_set_add_table ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) BEGIN; INSERT INTO public.a_huge VALUES (generate_series(1, 20000000), generate_series(1, 20000000)); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM a_huge; count ---------- 20000000 (1 row) \dtS+ a_huge; List of relations Schema | Name | Type | Owner | Size | Description --------+--------+-------+----------+--------+------------- public | a_huge | table | postgres | 996 MB | (1 row) \c :provider_dsn -- lots of small rows replication with DDL within transaction BEGIN; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.b_huge ( id integer primary key, id1 integer, data text default 'data', data1 text default 'data1' ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'b_huge'); replication_set_add_table ----------------------- t (1 row) INSERT INTO public.b_huge VALUES (generate_series(1,20000000), generate_series(1,20000000)); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM b_huge; count ---------- 20000000 (1 row) \dtS+ b_huge; List of relations Schema | Name | Type | Owner | Size | Description --------+--------+-------+----------+--------+------------- public | b_huge | table | postgres | 996 MB | (1 row) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.a_huge CASCADE; DROP TABLE public.b_huge CASCADE; $$); NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) pglogical-REL2_4_1/expected/huge_tx_100k_tables.out000066400000000000000000000506051415142317000222520ustar00rootroot00000000000000-- test huge transactions -- Set 'max_locks_per_transaction' to 10000 to run test SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- medium number of rows in many different tables (100k): replication with DDL outside transaction create or replace function create_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' CREATE TABLE public.HUGE' || i || ' ( id integer primary key, id1 integer, data text default ''''data'''', data1 text default ''''data1'''' ); '')'; EXECUTE cr_command; END LOOP; END; $$; -- write multile version of this statement create or replace function add_many_tables_to_replication_set(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT * FROM pglogical.replication_set_add_table( ''default'', ''HUGE' || i || ''' );'; EXECUTE cr_command; END LOOP; END; $$; create or replace function insert_into_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'INSERT INTO public.HUGE' || i || ' VALUES (generate_series(1, 200), generate_series(1, 200))'; EXECUTE cr_command; END LOOP; END; $$; create or replace function drop_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' DROP TABLE public.HUGE' || i ||' CASCADE; '')'; EXECUTE cr_command; END LOOP; END; $$; SELECT * FROM create_many_tables(1,100000); create_many_tables -------------------- (1 row) SELECT * FROM add_many_tables_to_replication_set(1,100000); add_many_tables_to_replication_set --------------------------- (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) BEGIN; SELECT * FROM insert_into_many_tables(1,100000); insert_into_many_tables ------------------------- (1 row) COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; count -------- 200 (1 row) \dtS+ public.HUGE2; List of relations Schema | Name | Type | Owner | Size | Description --------+-------+-------+----------+---------+------------- public | huge2 | table | postgres | 10 kB | (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,100000); NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object drop_many_tables ------------------ (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- medium number of rows in many different tables (100k): replication with DDL inside transaction BEGIN; SELECT * FROM create_many_tables(1,100000); create_many_tables -------------------- (1 row) SELECT * FROM add_many_tables_to_replication_set(1,100000); add_many_tables_to_replication_set --------------------------- (1 row) SELECT * FROM insert_into_many_tables(1,100000); insert_into_many_tables ------------------------- (1 row) COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; count -------- 200 (1 row) \dtS+ public.HUGE2; List of relations Schema | Name | Type | Owner | Size | Description --------+-------+-------+----------+---------+------------- public | huge2 | table | postgres | 10 kB | (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,100000); NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object drop_many_tables ------------------ (1 row) DROP function create_many_tables(int, int); DROP function add_many_tables_to_replication_set(int, int); DROP function insert_into_many_tables(int, int); DROP function drop_many_tables(int, int); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) pglogical-REL2_4_1/expected/huge_tx_many_tables.out000066400000000000000000000504521415142317000225430ustar00rootroot00000000000000-- test huge transactions SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- medium number of rows in many different tables: replication with DDL outside transaction create or replace function create_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' CREATE TABLE public.HUGE' || i || ' ( id integer primary key, id1 integer, data text default ''''data'''', data1 text default ''''data1'''' ); '')'; EXECUTE cr_command; END LOOP; END; $$; --write multiple version of this. create or replace function add_many_tables_to_replication_set(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT * FROM pglogical.replication_set_add_table( ''default'', ''HUGE' || i || ''' );'; EXECUTE cr_command; END LOOP; END; $$; create or replace function insert_into_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'INSERT INTO public.HUGE' || i || ' VALUES (generate_series(1, 100000), generate_series(1, 100000))'; EXECUTE cr_command; END LOOP; END; $$; create or replace function drop_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' DROP TABLE public.HUGE' || i ||' CASCADE; '')'; EXECUTE cr_command; END LOOP; END; $$; SELECT * FROM create_many_tables(1,200); create_many_tables -------------------- (1 row) SELECT * FROM add_many_tables_to_replication_set(1,200); add_many_tables_to_replication_set --------------------------- (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) BEGIN; SELECT * FROM insert_into_many_tables(1,200); insert_into_many_tables ------------------------- (1 row) COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; count -------- 100000 (1 row) \dtS+ public.HUGE2; List of relations Schema | Name | Type | Owner | Size | Description --------+-------+-------+----------+---------+------------- public | huge2 | table | postgres | 5128 kB | (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,200); NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object drop_many_tables ------------------ (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- medium number of rows in many different tables: replication with DDL inside transaction BEGIN; SELECT * FROM create_many_tables(1,200); create_many_tables -------------------- (1 row) SELECT * FROM add_many_tables_to_replication_set(1,200); add_many_tables_to_replication_set --------------------------- (1 row) SELECT * FROM insert_into_many_tables(1,200); insert_into_many_tables ------------------------- (1 row) COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; count -------- 100000 (1 row) \dtS+ public.HUGE2; List of relations Schema | Name | Type | Owner | Size | Description --------+-------+-------+----------+---------+------------- public | huge2 | table | postgres | 5128 kB | (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,200); NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object NOTICE: drop cascades to 1 other object drop_many_tables ------------------ (1 row) DROP function create_many_tables(int, int); DROP function add_many_tables_to_replication_set(int, int); DROP function insert_into_many_tables(int, int); DROP function drop_many_tables(int, int); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) pglogical-REL2_4_1/expected/infofuncs.out000066400000000000000000000021271415142317000205100ustar00rootroot00000000000000DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION pglogical; SELECT pglogical.pglogical_max_proto_version(); pglogical_max_proto_version ----------------------------- 1 (1 row) SELECT pglogical.pglogical_min_proto_version(); pglogical_min_proto_version ----------------------------- 1 (1 row) -- test extension version SELECT pglogical.pglogical_version() = extversion FROM pg_extension WHERE extname = 'pglogical'; ?column? ---------- t (1 row) DROP EXTENSION pglogical; -- test upgrades DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; SELECT pglogical.pglogical_version() = extversion FROM pg_extension WHERE extname = 'pglogical'; ?column? ---------- t (1 row) DROP EXTENSION pglogical; pglogical-REL2_4_1/expected/init.out000066400000000000000000000100061415142317000174540ustar00rootroot00000000000000-- This should be done with pg_regress's --create-role option -- but it's blocked by bug 37906 SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SET client_min_messages = 'warning'; DROP USER IF EXISTS nonsuper; DROP USER IF EXISTS super; CREATE USER nonsuper WITH replication; CREATE USER super SUPERUSER; \c :subscriber_dsn SET client_min_messages = 'warning'; DROP USER IF EXISTS nonsuper; DROP USER IF EXISTS super; CREATE USER nonsuper WITH replication; CREATE USER super SUPERUSER; -- Can't because of bug 37906 --GRANT ALL ON DATABASE regress TO nonsuper; --GRANT ALL ON DATABASE regress TO nonsuper; \c :provider_dsn GRANT ALL ON SCHEMA public TO nonsuper; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') >= 1000 THEN CREATE OR REPLACE FUNCTION public.pg_current_xlog_location() RETURNS pg_lsn LANGUAGE SQL AS 'SELECT pg_current_wal_lsn()'; ALTER FUNCTION public.pg_current_xlog_location() OWNER TO super; END IF; END; $$; \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; subdb ------------ 'postgres' (1 row) \gset \c :provider_dsn SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; \dx pglogical List of installed extensions Name | Version | Schema | Description -----------+---------+-----------+-------------------------------- pglogical | 2.4.1 | pglogical | PostgreSQL Logical Replication (1 row) SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super'); create_node ------------- 2689511696 (1 row) \c :subscriber_dsn SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=super'); create_node ------------- 1755434425 (1 row) BEGIN; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := true, forward_origins := '{}'); create_subscription --------------------- 3848008564 (1 row) /* * Remove the function we added in preseed because otherwise the restore of * schema will fail. We do this in same transaction as create_subscription() * because the subscription process will only start on commit. */ DROP FUNCTION IF EXISTS public.pglogical_regress_variables(); COMMIT; BEGIN; SET LOCAL statement_timeout = '30s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+--------------+--------------+---------- f | 3848008564 | | | t (1 row) -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT count(*) FROM pg_stat_replication; count ------- 1 (1 row) pglogical-REL2_4_1/expected/init_fail.out000066400000000000000000000066321415142317000204610ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonreplica; CREATE USER nonreplica; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; GRANT ALL ON SCHEMA pglogical TO nonreplica; GRANT ALL ON ALL TABLES IN SCHEMA pglogical TO nonreplica; \c :subscriber_dsn SET client_min_messages = 'warning'; \set VERBOSITY terse DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; -- fail (local node not existing) SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); ERROR: local pglogical node not found -- succeed SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=nonreplica'); create_node ------------- 1755434425 (1 row) -- fail (can't connect to remote) DO $$ BEGIN SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonexisting', forward_origins := '{}'); EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION '%:%', split_part(SQLERRM, ':', 1), (regexp_matches(SQLERRM, '^.*( FATAL:.*role.*)$'))[1]; END; $$; ERROR: could not connect to the postgresql server: FATAL: role "nonexisting" does not exist -- fail (remote node not existing) SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); ERROR: could not fetch remote node info: ERROR: local pglogical node not found \c :provider_dsn -- succeed SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica'); create_node ------------- 2689511696 (1 row) \c :subscriber_dsn \set VERBOSITY terse -- fail (can't connect with replication connection to remote) DO $$ BEGIN SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION '%', split_part(SQLERRM, ':', 1); END; $$; ERROR: could not connect to the postgresql server in replication mode -- cleanup SELECT * FROM pglogical.drop_node('test_subscriber'); drop_node ----------- t (1 row) DROP EXTENSION pglogical; \c :provider_dsn SELECT * FROM pglogical.drop_node('test_provider'); drop_node ----------- t (1 row) SET client_min_messages = 'warning'; DROP OWNED BY nonreplica; DROP ROLE IF EXISTS nonreplica; DROP EXTENSION pglogical; pglogical-REL2_4_1/expected/interfaces.out000066400000000000000000000053671415142317000206520ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE USER super2 SUPERUSER; \c :subscriber_dsn SELECT * FROM pglogical.alter_node_add_interface('test_provider', 'super2', (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super2'); alter_node_add_interface -------------------------- 3319308158 (1 row) SELECT * FROM pglogical.alter_subscription_interface('test_subscription', 'super2'); alter_subscription_interface ------------------------------ t (1 row) DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status != 'down') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT pg_sleep(0.1); pg_sleep ---------- (1 row) SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | replication_sets | forward_origins -------------------+-------------+---------------+---------------------------------------+----------------- test_subscription | replicating | test_provider | {default,default_insert_only,ddl_sql} | (1 row) \c :provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT usename FROM pg_stat_replication WHERE application_name = 'test_subscription'; usename --------- super2 (1 row) \c :subscriber_dsn SELECT * FROM pglogical.alter_subscription_interface('test_subscription', 'test_provider'); alter_subscription_interface ------------------------------ t (1 row) DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status != 'down') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT pg_sleep(0.1); pg_sleep ---------- (1 row) SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | replication_sets | forward_origins -------------------+-------------+---------------+---------------------------------------+----------------- test_subscription | replicating | test_provider | {default,default_insert_only,ddl_sql} | (1 row) \c :provider_dsn DROP USER super2; SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT usename FROM pg_stat_replication WHERE application_name = 'test_subscription'; usename --------- super (1 row) pglogical-REL2_4_1/expected/matview.out000066400000000000000000000054671415142317000202040ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_tbl(id serial primary key, data text); CREATE MATERIALIZED VIEW public.test_mv AS (SELECT * FROM public.test_tbl); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) INSERT INTO test_tbl VALUES (1, 'a'); REFRESH MATERIALIZED VIEW test_mv; INSERT INTO test_tbl VALUES (2, 'b'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) SELECT * FROM test_tbl ORDER BY id; id | data ----+------ 1 | a 2 | b (2 rows) SELECT * FROM test_mv ORDER BY id; id | data ----+------ 1 | a (1 row) \c :subscriber_dsn SELECT * FROM test_tbl ORDER BY id; id | data ----+------ 1 | a 2 | b (2 rows) SELECT * FROM test_mv ORDER BY id; id | data ----+------ (0 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX ON public.test_mv(id); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO test_tbl VALUES (3, 'c'); REFRESH MATERIALIZED VIEW CONCURRENTLY test_mv; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO test_tbl VALUES (4, 'd'); SELECT pglogical.replicate_ddl_command($$ REFRESH MATERIALIZED VIEW public.test_mv; $$); replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) INSERT INTO test_tbl VALUES (5, 'e'); SELECT pglogical.replicate_ddl_command($$ REFRESH MATERIALIZED VIEW CONCURRENTLY public.test_mv; $$); replicate_ddl_command ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) SELECT * FROM test_tbl ORDER BY id; id | data ----+------ 1 | a 2 | b 3 | c 4 | d 5 | e (5 rows) SELECT * FROM test_mv ORDER BY id; id | data ----+------ 1 | a 2 | b 3 | c 4 | d 5 | e (5 rows) \c :subscriber_dsn SELECT * FROM test_tbl ORDER BY id; id | data ----+------ 1 | a 2 | b 3 | c 4 | d 5 | e (5 rows) SELECT * FROM test_mv ORDER BY id; id | data ----+------ 1 | a 2 | b 3 | c 4 | d 5 | e (5 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_tbl CASCADE; $$); NOTICE: drop cascades to materialized view public.test_mv NOTICE: drop cascades to table public.test_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/multiple_upstreams.out000066400000000000000000000124311415142317000224530ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; subdb ------------ 'postgres' (1 row) \gset \c :provider1_dsn SET client_min_messages = 'warning'; GRANT ALL ON SCHEMA public TO nonsuper; SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; SELECT * FROM pglogical.create_node(node_name := 'test_provider1', dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super'); create_node ------------- 866557357 (1 row) \c :provider_dsn -- add these entries to provider SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.multi_ups_tbl(id integer primary key, key text unique not null, data text); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO multi_ups_tbl VALUES(1, 'key1', 'data1'); INSERT INTO multi_ups_tbl VALUES(2, 'key2', 'data2'); INSERT INTO multi_ups_tbl VALUES(3, 'key3', 'data3'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl', true); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider1_dsn -- add these entries to provider1 CREATE TABLE multi_ups_tbl(id integer primary key, key text unique not null, data text); INSERT INTO multi_ups_tbl VALUES(4, 'key4', 'data4'); INSERT INTO multi_ups_tbl VALUES(5, 'key5', 'data5'); INSERT INTO multi_ups_tbl VALUES(6, 'key6', 'data6'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl'); replication_set_add_table --------------------------- t (1 row) \c :subscriber_dsn -- We'll use the already existing pglogical node -- notice synchronize_structure as false when table definition already exists SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription1', provider_dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, forward_origins := '{}'); create_subscription --------------------- 3102546391 (1 row) BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription1'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | replication_sets | forward_origins --------------------+-------------+----------------+---------------------------------------+----------------- test_subscription | replicating | test_provider | {default,default_insert_only,ddl_sql} | test_subscription1 | replicating | test_provider1 | {default,default_insert_only,ddl_sql} | (2 rows) SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+--------------+---------------+---------- f | 3102546391 | public | multi_ups_tbl | t d | 3102546391 | | | t d | 3848008564 | public | multi_ups_tbl | t f | 3848008564 | | | t (4 rows) SELECT * from multi_ups_tbl ORDER BY id; id | key | data ----+------+------- 1 | key1 | data1 2 | key2 | data2 3 | key3 | data3 4 | key4 | data4 5 | key5 | data5 6 | key6 | data6 (6 rows) -- Make sure we see the slot and active connection \c :provider1_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t pglogical_output | logical | t (2 rows) SELECT count(*) FROM pg_stat_replication; count ------- 2 (1 row) -- cleanup \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.multi_ups_tbl CASCADE; $$); NOTICE: drop cascades to table public.multi_ups_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) \c :provider1_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider1'); ERROR: cannot drop node "test_provider1" because one or more replication slots for the node are still active \set VERBOSITY terse DROP TABLE public.multi_ups_tbl CASCADE; NOTICE: drop cascades to table multi_ups_tbl membership in replication set default \c :subscriber_dsn SELECT * FROM pglogical.drop_subscription('test_subscription1'); drop_subscription ------------------- 1 (1 row) \c :provider1_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider1'); drop_node ----------- t (1 row) SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT count(*) FROM pg_stat_replication; count ------- 1 (1 row) pglogical-REL2_4_1/expected/multiple_upstreams_1.out000066400000000000000000000124051415142317000226740ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider1_dsn SET client_min_messages = 'warning'; GRANT ALL ON SCHEMA public TO nonsuper; CREATE OR REPLACE FUNCTION public.pg_xlog_wait_remote_apply(i_pos pg_lsn, i_pid integer) RETURNS VOID AS $FUNC$ BEGIN WHILE EXISTS(SELECT true FROM pg_stat_get_wal_senders() s WHERE s.replay_location < i_pos AND (i_pid = 0 OR s.pid = i_pid)) LOOP PERFORM pg_sleep(0.01); END LOOP; END;$FUNC$ LANGUAGE plpgsql; SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical VERSION '2.0.0'; ALTER EXTENSION pglogical UPDATE; SELECT * FROM pglogical.create_node(node_name := 'test_provider1', dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super'); create_node ------------- 866557357 (1 row) \c :provider_dsn -- add these entries to provider SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.multi_ups_tbl(id integer primary key, key text unique not null, data text); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO multi_ups_tbl VALUES(1, 'key1', 'data1'); INSERT INTO multi_ups_tbl VALUES(2, 'key2', 'data2'); INSERT INTO multi_ups_tbl VALUES(3, 'key3', 'data3'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl', true); replication_set_add_table ----------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider1_dsn -- add these entries to provider1 CREATE TABLE multi_ups_tbl(id integer primary key, key text unique not null, data text); INSERT INTO multi_ups_tbl VALUES(4, 'key4', 'data4'); INSERT INTO multi_ups_tbl VALUES(5, 'key5', 'data5'); INSERT INTO multi_ups_tbl VALUES(6, 'key6', 'data6'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl'); replication_set_add_table ----------------------- t (1 row) \c :subscriber_dsn -- We'll use the already existing pglogical node -- notice synchronize_structure as false when table definition already exists BEGIN; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription1', provider_dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, forward_origins := '{}'); create_subscription --------------------- 3102546391 (1 row) /* * Remove the function we added in preseed because otherwise the restore of * schema will fail. We do this in same transaction as create_subscription() * because the subscription process will only start on commit. */ DROP FUNCTION IF EXISTS public.pglogical_regress_variables(); COMMIT; DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating' and subscription_name = 'test_subscription1') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | replication_sets | forward_origins --------------------+-------------+----------------+-----------------------------------------+----------------- test_subscription | replicating | test_provider | {default_insert_only,ddl_sql,repset_test,default} | test_subscription1 | replicating | test_provider1 | {default,default_insert_only,ddl_sql} | (2 rows) DO $$ BEGIN FOR i IN 1..300 LOOP IF EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status = 'r') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | sync_status -----------+------------+--------------+---------------+------------- f | 3102546391 | public | multi_ups_tbl | r d | 3102546391 | | | r d | 3848008564 | public | multi_ups_tbl | r f | 3848008564 | | | r (4 rows) SELECT * from multi_ups_tbl ORDER BY id; id | key | data ----+------+------- 1 | key1 | data1 2 | key2 | data2 3 | key3 | data3 4 | key4 | data4 5 | key5 | data5 6 | key6 | data6 (6 rows) -- Make sure we see the slot and active connection \c :provider1_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t pglogical_output | logical | t (2 rows) SELECT count(*) FROM pg_stat_replication; count ------- 2 (1 row) -- cleanup \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.multi_ups_tbl CASCADE; $$); NOTICE: drop cascades to table public.multi_ups_tbl membership in replication set default CONTEXT: during execution of queued SQL statement: DROP TABLE public.multi_ups_tbl CASCADE; replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/node_origin_cascade.out000066400000000000000000000141511415142317000224550ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT E'\'' || current_database() || E'\'' AS pubdb; pubdb -------------- 'regression' (1 row) \gset \c :orig_provider_dsn SET client_min_messages = 'warning'; GRANT ALL ON SCHEMA public TO nonsuper; SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; SELECT * FROM pglogical.create_node(node_name := 'test_orig_provider', dsn := (SELECT orig_provider_dsn FROM pglogical_regress_variables()) || ' user=super'); create_node ------------- 4029216451 (1 row) \c :provider_dsn SET client_min_messages = 'warning'; -- test_provider pglogical node already exists here. BEGIN; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_orig_subscription', provider_dsn := (SELECT orig_provider_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, forward_origins := '{}'); create_subscription --------------------- 3575176667 (1 row) COMMIT; BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_orig_subscription'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | replication_sets | forward_origins ------------------------+-------------+--------------------+---------------------------------------+----------------- test_orig_subscription | replicating | test_orig_provider | {default,default_insert_only,ddl_sql} | (1 row) SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+--------------+--------------+---------- d | 3575176667 | | | t (1 row) -- Make sure we see the slot and active connection \c :orig_provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t pglogical_output | logical | t (2 rows) SELECT count(*) FROM pg_stat_replication; count ------- 2 (1 row) -- Table that replicates from top level provider to mid-level pglogical node. \c :orig_provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.top_level_tbl ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'top_level_tbl'); replication_set_add_table --------------------------- t (1 row) INSERT INTO top_level_tbl(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :provider_dsn SELECT id, other, data, something FROM top_level_tbl ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (5 rows) -- Table that replicates from top level provider to mid-level pglogical node. SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.mid_level_tbl ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'mid_level_tbl'); replication_set_add_table --------------------------- t (1 row) INSERT INTO mid_level_tbl(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, something FROM mid_level_tbl ORDER BY id; id | other | data | something ----+-------+------+------------------ 1 | 5 | foo | @ 1 min 2 | 4 | bar | @ 84 days 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (5 rows) -- drop the tables \c :orig_provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.top_level_tbl CASCADE; $$); NOTICE: drop cascades to table public.top_level_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.mid_level_tbl CASCADE; $$); NOTICE: drop cascades to table public.mid_level_tbl membership in replication set default replicate_ddl_command ----------------------- t (1 row) \c :provider_dsn SELECT * FROM pglogical.drop_subscription('test_orig_subscription'); drop_subscription ------------------- 1 (1 row) \c :orig_provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_orig_provider'); drop_node ----------- t (1 row) SELECT plugin, slot_type, active FROM pg_replication_slots; plugin | slot_type | active ------------------+-----------+-------- pglogical_output | logical | t (1 row) SELECT count(*) FROM pg_stat_replication; count ------- 1 (1 row) pglogical-REL2_4_1/expected/parallel.out000066400000000000000000000133431415142317000203140ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT * FROM pglogical.create_replication_set('parallel'); create_replication_set ------------------------ 3731651575 (1 row) \c :subscriber_dsn SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_parallel', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{parallel,default}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false ); ERROR: existing subscription "test_subscription" to node "test_provider" already subscribes to replication set "default" SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_parallel', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{parallel}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false ); create_subscription --------------------- 4051189029 (1 row) BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription_parallel'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; sync_kind | sync_subid | sync_nspname | sync_relname | ?column? -----------+------------+--------------+--------------+---------- f | 3848008564 | | | t i | 4051189029 | | | t (2 rows) SELECT * FROM pglogical.show_subscription_status(); subscription_name | status | provider_node | provider_dsn | slot_name | replication_sets | forward_origins ----------------------------+-------------+---------------+------------------------------+--------------------------------------------+---------------------------------------+----------------- test_subscription | replicating | test_provider | dbname=regression user=super | pgl_postgres_test_provider_test_sube55bf37 | {default,default_insert_only,ddl_sql} | test_subscription_parallel | replicating | test_provider | dbname=regression user=super | pgl_postgres_test_provider_test_subf1783d2 | {parallel} | (2 rows) -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, database, active FROM pg_replication_slots; plugin | slot_type | database | active ------------------+-----------+------------+-------- pglogical_output | logical | regression | t pglogical_output | logical | regression | t (2 rows) SELECT count(*) FROM pg_stat_replication; count ------- 2 (1 row) SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); CREATE TABLE public.basic_dml2 ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml1'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('parallel', 'basic_dml2'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) WITH one AS ( INSERT INTO basic_dml1(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL) RETURNING * ) INSERT INTO basic_dml2 SELECT * FROM one; BEGIN; UPDATE basic_dml1 SET other = id, something = something - '10 seconds'::interval WHERE id < 3; DELETE FROM basic_dml2 WHERE id < 3; COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) SELECT * FROM basic_dml1; id | other | data | something ----+-------+------+-------------------- 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | 1 | 1 | foo | @ 50 secs 2 | 2 | bar | @ 84 days -10 secs (5 rows) SELECT * FROM basic_dml2; id | other | data | something ----+-------+------+------------------ 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (3 rows) \c :subscriber_dsn SELECT * FROM basic_dml1; id | other | data | something ----+-------+------+-------------------- 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | 1 | 1 | foo | @ 50 secs 2 | 2 | bar | @ 84 days -10 secs (5 rows) SELECT * FROM basic_dml2; id | other | data | something ----+-------+------+------------------ 3 | 3 | baz | @ 2 years 1 hour 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (3 rows) SELECT pglogical.drop_subscription('test_subscription_parallel'); drop_subscription ------------------- 1 (1 row) \c :provider_dsn \set VERBOSITY terse SELECT * FROM pglogical.drop_replication_set('parallel'); drop_replication_set ---------------------- t (1 row) SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml1 CASCADE; DROP TABLE public.basic_dml2 CASCADE; $$); NOTICE: drop cascades to table public.basic_dml1 membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/preseed.out000066400000000000000000000072761415142317000201570ustar00rootroot00000000000000-- Indirection for connection strings CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; SELECT * FROM pglogical_regress_variables() \gset /* * Tests to ensure that objects/data that exists pre-clone is successfully * cloned. The results are checked, after the clone, in preseed_check.sql. */ \c :provider_dsn CREATE SEQUENCE some_local_seq; CREATE TABLE some_local_tbl(id serial primary key, key text unique not null, data text); INSERT INTO some_local_tbl(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl1(id serial, key text unique not null, data text); INSERT INTO some_local_tbl1(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl1(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl1(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl2(id serial, key text, data text); INSERT INTO some_local_tbl2(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl2(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl2(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl3(id integer, key text, data text); INSERT INTO some_local_tbl3(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl3(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl3(key, data) VALUES('key3', 'data3'); /* * Make sure that the pglogical_regress_variables function exists both on * provider and subscriber since the original connection might have been * to completely different database. */ CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; CREATE DATABASE regression1; CREATE DATABASE sourcedb; \c :orig_provider_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; \c :provider1_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; \c :subscriber_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; pglogical-REL2_4_1/expected/preseed_check.out000066400000000000000000000073321415142317000213050ustar00rootroot00000000000000-- Verify data from preseed.sql has correctly been cloned SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | t | f data | f | f (3 rows) SELECT * FROM some_local_tbl ORDER BY id; id | key | data ----+------+------- 1 | key1 | data1 2 | key2 | 3 | key3 | data3 (3 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl1'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | t | f data | f | f (3 rows) SELECT * FROM some_local_tbl1 ORDER BY id; id | key | data ----+------+------- 1 | key1 | data1 2 | key2 | 3 | key3 | data3 (3 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl2'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | f | f data | f | f (3 rows) SELECT * FROM some_local_tbl2 ORDER BY id; id | key | data ----+------+------- 1 | key1 | data1 2 | key2 | 3 | key3 | data3 (3 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl3'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | f | f key | f | f data | f | f (3 rows) SELECT * FROM some_local_tbl3 ORDER BY id; id | key | data ----+------+------- | key1 | data1 | key2 | | key3 | data3 (3 rows) \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | t | f data | f | f (3 rows) SELECT * FROM some_local_tbl ORDER BY id; id | key | data ----+-----+------ (0 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl1'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | t | f data | f | f (3 rows) SELECT * FROM some_local_tbl1 ORDER BY id; id | key | data ----+-----+------ (0 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl2'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | t | f key | f | f data | f | f (3 rows) SELECT * FROM some_local_tbl2 ORDER BY id; id | key | data ----+-----+------ (0 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl3'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ---------+------------+-------------- id | f | f key | f | f data | f | f (3 rows) SELECT * FROM some_local_tbl3 ORDER BY id; id | key | data ----+-----+------ (0 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP SEQUENCE public.some_local_seq; DROP TABLE public.some_local_tbl; DROP TABLE public.some_local_tbl1; DROP TABLE public.some_local_tbl2; DROP TABLE public.some_local_tbl3; $$); replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/primary_key.out000066400000000000000000000511371415142317000210560ustar00rootroot00000000000000--PRIMARY KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- testing update of primary key -- create table with primary key and 3 other tables referencing it SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.pk_users ( id integer PRIMARY KEY, another_id integer unique not null, a_id integer, name text, address text ); --pass $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); replication_set_add_table --------------------------- t (1 row) INSERT INTO pk_users VALUES(1,11,1,'User1', 'Address1'); INSERT INTO pk_users VALUES(2,12,1,'User2', 'Address2'); INSERT INTO pk_users VALUES(3,13,2,'User3', 'Address3'); INSERT INTO pk_users VALUES(4,14,2,'User4', 'Address4'); SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+---------- 1 | 11 | 1 | User1 | Address1 2 | 12 | 1 | User2 | Address2 3 | 13 | 2 | User3 | Address3 4 | 14 | 2 | User4 | Address4 (4 rows) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ------------+------------+-------------- id | t | f another_id | t | f a_id | f | f name | f | f address | f | f (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+---------- 1 | 11 | 1 | User1 | Address1 2 | 12 | 1 | User2 | Address2 3 | 13 | 2 | User3 | Address3 4 | 14 | 2 | User4 | Address4 (4 rows) \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress1' WHERE id=1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+----------------- 1 | 11 | 1 | User1 | UpdatedAddress1 2 | 12 | 1 | User2 | Address2 3 | 13 | 2 | User3 | Address3 4 | 14 | 2 | User4 | Address4 (4 rows) -- Set up for secondary unique index and two-index -- conflict handling cases. INSERT INTO pk_users VALUES (5000,5000,0,'sub1',NULL); INSERT INTO pk_users VALUES (6000,6000,0,'sub2',NULL); \c :provider_dsn -- Resolve a conflict on the secondary unique constraint INSERT INTO pk_users VALUES (5001,5000,1,'pub1',NULL); -- And a conflict that violates two constraints INSERT INTO pk_users VALUES (6000,6000,1,'pub2',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users WHERE id IN (5000,5001,6000) ORDER BY id; id | another_id | a_id | name | address ------+------------+------+------+--------- 5001 | 5000 | 1 | pub1 | 6000 | 6000 | 1 | pub2 | (2 rows) \c :provider_dsn DELETE FROM pk_users WHERE id IN (5000,5001,6000); \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX another_id_temp_idx ON public.pk_users (another_id); ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY USING INDEX another_id_temp_idx; ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_another_id_key; $$); NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "another_id_temp_idx" to "pk_users_pkey" replicate_ddl_command ----------------------- t (1 row) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ------------+------------+-------------- id | t | f another_id | t | f a_id | f | f name | f | f address | f | f (5 rows) UPDATE pk_users SET address='UpdatedAddress2' WHERE id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ------------+------------+-------------- id | t | f another_id | t | f a_id | f | f name | f | f address | f | f (5 rows) SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+----------------- 1 | 11 | 1 | User1 | UpdatedAddress1 2 | 12 | 1 | User2 | UpdatedAddress2 3 | 13 | 2 | User3 | Address3 4 | 14 | 2 | User4 | Address4 (4 rows) \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress3' WHERE another_id=12; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+----------------- 1 | 11 | 1 | User1 | UpdatedAddress1 2 | 12 | 1 | User2 | UpdatedAddress3 3 | 13 | 2 | User3 | Address3 4 | 14 | 2 | User4 | Address4 (4 rows) \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress4' WHERE a_id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn INSERT INTO pk_users VALUES(4,15,2,'User5', 'Address5'); -- subscriber now has duplicated value in id field while provider does not SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+----------------- 1 | 11 | 1 | User1 | UpdatedAddress1 2 | 12 | 1 | User2 | UpdatedAddress3 3 | 13 | 2 | User3 | UpdatedAddress4 4 | 14 | 2 | User4 | UpdatedAddress4 4 | 15 | 2 | User5 | Address5 (5 rows) \c :provider_dsn \set VERBOSITY terse SELECT quote_literal(pg_current_xlog_location()) as curr_lsn \gset SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX id_temp_idx ON public.pk_users (id); ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY USING INDEX id_temp_idx; $$); NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "id_temp_idx" to "pk_users_pkey" replicate_ddl_command ----------------------- t (1 row) SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ------------+------------+-------------- id | t | f another_id | t | f a_id | f | f name | f | f address | f | f (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, :curr_lsn); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; attname | attnotnull | attisdropped ------------+------------+-------------- id | t | f another_id | t | f a_id | f | f name | f | f address | f | f (5 rows) SELECT pglogical.alter_subscription_disable('test_subscription', true); alter_subscription_disable ---------------------------- t (1 row) \c :provider_dsn -- Wait for subscription to disconnect. It will have been bouncing already -- due to apply worker restarts, but if it was retrying it'll stay down -- this time. DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = false) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN json_extract_path(data::json, 'relation') END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default,ddl_sql'); action | data --------+----------------------- "S" | "B" | "I" | ["pglogical","queue"] "C" | (4 rows) SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default,ddl_sql'); action | data --------+------ (0 rows) \c :subscriber_dsn SELECT pglogical.alter_subscription_enable('test_subscription', true); alter_subscription_enable --------------------------- t (1 row) DELETE FROM pk_users WHERE id = 4;-- remove the offending entries. \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = true) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; UPDATE pk_users SET address='UpdatedAddress2' WHERE id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+-------+----------------- 1 | 11 | 1 | User1 | UpdatedAddress1 2 | 12 | 1 | User2 | UpdatedAddress2 3 | 13 | 2 | User3 | UpdatedAddress4 (3 rows) \c :provider_dsn -- -- Test to show that we don't defend against alterations to tables -- that will break replication once added to a repset, or prevent -- dml that would break on apply. -- -- See 2ndQuadrant/pglogical_internal#146 -- -- Show that the current PK is not marked 'indisreplident' because we use -- REPLICA IDENTITY DEFAULT SELECT indisreplident FROM pg_index WHERE indexrelid = 'pk_users_pkey'::regclass; indisreplident ---------------- f (1 row) SELECT relreplident FROM pg_class WHERE oid = 'pk_users'::regclass; relreplident -------------- d (1 row) SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey; $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO pk_users VALUES(90,0,0,'User90', 'Address90'); -- pglogical will stop us adding the table to a repset if we try to, -- but didn't stop us altering it, and won't stop us updating it... BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'pk_users'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); ERROR: table pk_users cannot be added to replication set default ROLLBACK; -- Per 2ndQuadrant/pglogical_internal#146 this shouldn't be allowed, but -- currently is. Logical decoding will fail to capture this change and we -- won't progress with decoding. -- -- This will get recorded by logical decoding with no 'oldkey' values, -- causing pglogical to fail to apply it with an error like -- -- CONFLICT: remote UPDATE on relation public.pk_users (tuple not found). Resolution: skip. -- UPDATE pk_users SET id = 91 WHERE id = 90; -- Catchup will replay the insert and succeed, but the update -- will be lost. BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ERROR: canceling statement due to statement timeout ROLLBACK; -- To carry on we'll need to make the index on the downstream -- (which is odd, because logical decoding didn't capture the -- oldkey of the tuple, so how can we apply it?) \c :subscriber_dsn ALTER TABLE public.pk_users ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; \c :provider_dsn ALTER TABLE public.pk_users ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- Demonstrate that deferrable indexes aren't yet supported for updates on downstream -- and will fail with an informative error. SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) DEFERRABLE INITIALLY DEFERRED; $$); replicate_ddl_command ----------------------- t (1 row) -- Not allowed, deferrable ALTER TABLE public.pk_users REPLICA IDENTITY USING INDEX pk_users_pkey; ERROR: cannot use non-immediate index "pk_users_pkey" as replica identity -- New index isn't REPLICA IDENTITY either SELECT indisreplident FROM pg_index WHERE indexrelid = 'pk_users_pkey'::regclass; indisreplident ---------------- f (1 row) -- pglogical won't let us add the table to a repset, though -- it doesn't stop us altering it; see 2ndQuadrant/pglogical_internal#146 BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'pk_users'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); ERROR: table pk_users cannot be added to replication set default ROLLBACK; -- We can still INSERT (which is fine) INSERT INTO pk_users VALUES(100,0,0,'User100', 'Address100'); -- FIXME pglogical shouldn't allow this, no valid replica identity exists -- see 2ndQuadrant/pglogical_internal#146 UPDATE pk_users SET id = 101 WHERE id = 100; -- Must time out, apply will fail on downstream due to no replident index BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ERROR: canceling statement due to statement timeout ROLLBACK; \c :subscriber_dsn -- entry 100 must be absent since we can't apply it without -- a suitable pk SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; id ----- 90 100 (2 rows) -- we can recover by re-creating the pk as non-deferrable ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; -- then replay. Toggle the subscription's enabled state -- to make it recover faster for a quicker test run. SELECT pglogical.alter_subscription_disable('test_subscription', true); alter_subscription_disable ---------------------------- t (1 row) SELECT pglogical.alter_subscription_enable('test_subscription', true); alter_subscription_enable --------------------------- t (1 row) \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; id ----- 90 100 (2 rows) \c :provider_dsn -- Subscriber and provider have diverged due to inability to replicate -- the UPDATEs SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; id ----- 91 101 (2 rows) -- Demonstrate that we properly handle wide conflict rows \c :subscriber_dsn INSERT INTO pk_users (id, another_id, address) VALUES (200,2000,repeat('waah daah sooo mooo', 1000)); \c :provider_dsn INSERT INTO pk_users (id, another_id, address) VALUES (200,2000,repeat('boop boop doop boop', 1000)); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, another_id, left(address,30) AS address_abbrev FROM pk_users WHERE another_id = 2000; id | another_id | address_abbrev -----+------------+-------------------------------- 200 | 2000 | boop boop doop boopboop boop d (1 row) -- DELETE conflicts; the DELETE is discarded \c :subscriber_dsn DELETE FROM pk_users WHERE id = 1; \c :provider_dsn DELETE FROM pk_users WHERE id = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- UPDATE conflicts violating multiple constraints. -- For this one we need to put the secondary unique -- constraint back. TRUNCATE TABLE pk_users; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX pk_users_another_id_idx ON public.pk_users(another_id); $$); replicate_ddl_command ----------------------- t (1 row) \c :subscriber_dsn INSERT INTO pk_users VALUES (1,10,0,'sub',NULL), (2,20,0,'sub',NULL); \c :provider_dsn INSERT INTO pk_users VALUES (3,11,1,'pub',NULL), (4,22,1,'pub',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+------+--------- 1 | 10 | 0 | sub | 2 | 20 | 0 | sub | 3 | 11 | 1 | pub | 4 | 22 | 1 | pub | (4 rows) \c :provider_dsn -- UPDATE one of our upstream tuples to violate both constraints on the -- downstream. The constraints are independent but there's only one existing -- downstream tuple that violates both constraints. We'll match it by replica -- identity, replace it, and satisfy the other constraint in the process. UPDATE pk_users SET id=1, another_id = 10, name='should_error' WHERE id = 3 AND another_id = 11; SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+--------------+--------- 1 | 10 | 1 | should_error | 4 | 22 | 1 | pub | (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- UPDATEs to missing rows could either resurrect the row or conclude it -- shouldn't exist and discard it. Currently pgl unconditionally discards, so -- this row's name is a misnomer. \c :subscriber_dsn DELETE FROM pk_users WHERE id = 4 AND another_id = 22; \c :provider_dsn UPDATE pk_users SET name = 'jesus' WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- No resurrection here SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+--------------+--------- 1 | 10 | 1 | should_error | 2 | 20 | 0 | sub | 3 | 11 | 1 | pub | (3 rows) -- But if the UPDATE would create a row that violates -- a secondary unique index (but doesn't match the replident) -- we'll ERROR on the secondary index. INSERT INTO pk_users VALUES (5,55,0,'sub',NULL); \c :provider_dsn INSERT INTO pk_users VALUES (6,66,0,'sub',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- The new row (6,55) will conflict with (5,55) UPDATE pk_users SET another_id = 55, name = 'pub_should_error' WHERE id = 6; SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+------------------+--------- 1 | 10 | 1 | should_error | 4 | 22 | 1 | jesus | 6 | 55 | 0 | pub_should_error | (3 rows) -- We'll time out due to apply errors BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ERROR: canceling statement due to statement timeout ROLLBACK; -- This time we'll fix it by deleting the conflicting row \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+--------------+--------- 1 | 10 | 1 | should_error | 2 | 20 | 0 | sub | 3 | 11 | 1 | pub | 5 | 55 | 0 | sub | 6 | 66 | 0 | sub | (5 rows) DELETE FROM pk_users WHERE id = 5; \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; id | another_id | a_id | name | address ----+------------+------+------------------+--------- 1 | 10 | 1 | should_error | 2 | 20 | 0 | sub | 3 | 11 | 1 | pub | 6 | 55 | 0 | pub_should_error | (4 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.pk_users CASCADE; $$); NOTICE: drop cascades to table public.pk_users membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/replication_set.out000066400000000000000000000242011415142317000216770ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SCHEMA normalschema; CREATE SCHEMA "strange.schema-IS"; CREATE TABLE public.test_publicschema(id serial primary key, data text); CREATE TABLE normalschema.test_normalschema(id serial primary key); CREATE TABLE "strange.schema-IS".test_strangeschema(id serial primary key); CREATE TABLE public.test_nopkey(id int); CREATE UNLOGGED TABLE public.test_unlogged(id int primary key); $$); replicate_ddl_command ----------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; nspname | relname | set_name -------------------+--------------------+---------- normalschema | test_normalschema | public | test_nopkey | public | test_publicschema | strange.schema-IS | test_strangeschema | (4 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- show initial replication sets SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; nspname | relname | set_name -------------------+--------------------+---------- normalschema | test_normalschema | public | test_nopkey | public | test_publicschema | strange.schema-IS | test_strangeschema | (4 rows) -- not existing replication set SELECT * FROM pglogical.replication_set_add_table('nonexisting', 'test_publicschema'); ERROR: replication set nonexisting not found -- create some replication sets SELECT * FROM pglogical.create_replication_set('repset_replicate_all'); create_replication_set ------------------------ 1767380104 (1 row) SELECT * FROM pglogical.create_replication_set('repset_replicate_instrunc', replicate_update := false, replicate_delete := false); create_replication_set ------------------------ 348382733 (1 row) SELECT * FROM pglogical.create_replication_set('repset_replicate_insupd', replicate_delete := false, replicate_truncate := false); create_replication_set ------------------------ 128878480 (1 row) -- add tables SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_publicschema'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_replicate_instrunc', 'normalschema.test_normalschema'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', 'normalschema.test_normalschema'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', '"strange.schema-IS".test_strangeschema'); replication_set_add_table --------------------------- t (1 row) -- should fail SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_unlogged'); ERROR: UNLOGGED and TEMP tables cannot be replicated SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_nopkey'); ERROR: table test_nopkey cannot be added to replication set repset_replicate_all DETAIL: table does not have PRIMARY KEY and given replication set is configured to replicate UPDATEs and/or DELETEs HINT: Add a PRIMARY KEY to the table -- success SELECT * FROM pglogical.replication_set_add_table('repset_replicate_instrunc', 'test_nopkey'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.alter_replication_set('repset_replicate_insupd', replicate_truncate := true); alter_replication_set ----------------------- 128878480 (1 row) -- fail again SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', 'test_nopkey'); ERROR: table test_nopkey cannot be added to replication set repset_replicate_insupd DETAIL: table does not have PRIMARY KEY and given replication set is configured to replicate UPDATEs and/or DELETEs HINT: Add a PRIMARY KEY to the table SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); ERROR: table test_nopkey cannot be added to replication set default DETAIL: table does not have PRIMARY KEY and given replication set is configured to replicate UPDATEs and/or DELETEs HINT: Add a PRIMARY KEY to the table SELECT * FROM pglogical.alter_replication_set('repset_replicate_instrunc', replicate_update := true); ERROR: replication set repset_replicate_instrunc cannot be altered to replicate UPDATEs or DELETEs because it contains tables without PRIMARY KEY SELECT * FROM pglogical.alter_replication_set('repset_replicate_instrunc', replicate_delete := true); ERROR: replication set repset_replicate_instrunc cannot be altered to replicate UPDATEs or DELETEs because it contains tables without PRIMARY KEY -- Adding already-added fails \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'public.test_publicschema'); ERROR: duplicate key value violates unique constraint "replication_set_table_pkey" \set VERBOSITY default -- check the replication sets SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; nspname | relname | set_name -------------------+--------------------+--------------------------- normalschema | test_normalschema | repset_replicate_instrunc normalschema | test_normalschema | repset_replicate_insupd public | test_nopkey | repset_replicate_instrunc public | test_publicschema | repset_replicate_all strange.schema-IS | test_strangeschema | repset_replicate_insupd (5 rows) SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); replication_set_add_all_tables -------------------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; nspname | relname | set_name -------------------+--------------------+--------------------------- normalschema | test_normalschema | repset_replicate_instrunc normalschema | test_normalschema | repset_replicate_insupd public | test_nopkey | default_insert_only public | test_nopkey | repset_replicate_instrunc public | test_publicschema | default_insert_only public | test_publicschema | repset_replicate_all strange.schema-IS | test_strangeschema | repset_replicate_insupd (7 rows) --too short SELECT pglogical.create_replication_set(''); ERROR: replication set name cannot be empty -- Can't drop table while it's in a repset DROP TABLE public.test_publicschema; ERROR: cannot drop table test_publicschema because other objects depend on it DETAIL: table test_publicschema membership in replication set default_insert_only depends on table test_publicschema table test_publicschema membership in replication set repset_replicate_all depends on table test_publicschema HINT: Use DROP ... CASCADE to drop the dependent objects too. -- Can't drop table while it's in a repset BEGIN; SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema; $$); ERROR: cannot drop table public.test_publicschema because other objects depend on it DETAIL: table public.test_publicschema membership in replication set default_insert_only depends on table public.test_publicschema table public.test_publicschema membership in replication set repset_replicate_all depends on table public.test_publicschema HINT: Use DROP ... CASCADE to drop the dependent objects too. CONTEXT: during execution of queued SQL statement: DROP TABLE public.test_publicschema; ROLLBACK; -- Can CASCADE though, even outside ddlrep BEGIN; DROP TABLE public.test_publicschema CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table test_publicschema membership in replication set default_insert_only drop cascades to table test_publicschema membership in replication set repset_replicate_all ROLLBACK; -- ... and can drop after repset removal SELECT pglogical.replication_set_remove_table('repset_replicate_all', 'public.test_publicschema'); replication_set_remove_table ------------------------------ t (1 row) SELECT pglogical.replication_set_remove_table('default_insert_only', 'public.test_publicschema'); replication_set_remove_table ------------------------------ t (1 row) BEGIN; DROP TABLE public.test_publicschema; ROLLBACK; \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema CASCADE; DROP SCHEMA normalschema CASCADE; DROP SCHEMA "strange.schema-IS" CASCADE; DROP TABLE public.test_nopkey CASCADE; DROP TABLE public.test_unlogged CASCADE; $$); NOTICE: drop cascades to table normalschema.test_normalschema NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to table "strange.schema-IS".test_strangeschema NOTICE: drop cascades to table "strange.schema-IS".test_strangeschema membership in replication set repset_replicate_insupd NOTICE: drop cascades to 2 other objects replicate_ddl_command ----------------------- t (1 row) \c :subscriber_dsn SELECT * FROM pglogical.replication_set; set_id | set_nodeid | set_name | replicate_insert | replicate_update | replicate_delete | replicate_truncate ------------+------------+---------------------+------------------+------------------+------------------+-------------------- 828867312 | 1755434425 | default | t | t | t | t 3318003856 | 1755434425 | default_insert_only | t | f | f | t 2796587818 | 1755434425 | ddl_sql | t | f | f | f (3 rows) pglogical-REL2_4_1/expected/row_filter.out000066400000000000000000000511631415142317000206760ustar00rootroot00000000000000-- row based filtering SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, "SomeThing" interval, insert_xid bigint DEFAULT txid_current() ); $$); replicate_ddl_command ----------------------- t (1 row) -- used to check if initial copy does row filtering \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV -- create some functions: CREATE FUNCTION funcn_add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; create function funcn_nochange(text) returns text as 'select $1 limit 1' language sql stable; create or replace function funcn_get_curr_decade() returns integer as $$ (SELECT EXTRACT(DECADE FROM NOW()):: integer); $$ language sql volatile; -- we allow volatile functions, it's user's responsibility to not do writes SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'current_user = data'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) -- fail -- subselect SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := '(SELECT count(*) FROM pg_class) > 1'); ERROR: cannot use subquery in check constraint -- fail -- SELECT SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'SELECT true'); ERROR: syntax error at or near "SELECT" CONTEXT: invalid row_filter expression "SELECT true" -- fail -- nonexisting column SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'foobar'); ERROR: column "foobar" does not exist -- fail -- not coercable to bool SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'data'); ERROR: argument of row_filter must be type boolean, not type text SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id between 2 AND 4$rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := NULL); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id > funcn_add(1,2) $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$data = funcn_nochange('baz') $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ other > funcn_get_curr_decade() $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) -- use this filter for rest of the test SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', true, row_filter := $rf$id > 1 AND data IS DISTINCT FROM 'baz' AND data IS DISTINCT FROM 'bbb'$rf$); replication_set_add_table --------------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname = 'basic_dml'; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) -- fail, the membership in repset depends on data column \set VERBOSITY terse ALTER TABLE basic_dml DROP COLUMN data; ERROR: cannot drop table basic_dml column data because other objects depend on it \set VERBOSITY default SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- wait for the initial data to copy BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ------+-------+------+----------- 5000 | 1 | aaa | @ 1 hour 5002 | 3 | ccc | @ 3 mins 5003 | 4 | ddd | @ 4 days (3 rows) ALTER TABLE public.basic_dml ADD COLUMN subonly integer; ALTER TABLE public.basic_dml ADD COLUMN subonly_def integer DEFAULT 99; ALTER TABLE public.basic_dml ADD COLUMN subonly_def_ts timestamptz DEFAULT current_timestamp; \c :provider_dsn TRUNCATE basic_dml; -- check basic insert replication INSERT INTO basic_dml(other, data, "SomeThing") VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------------- 2 | 4 | bar | @ 84 days 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (3 rows) -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, "SomeThing" = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- 2 | 4 | bar | @ 84 days 4 | 4 | | @ 3 days 5 | 1 | | (3 rows) -- update multiple rows \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- 2 | 2 | bar2 | @ 84 days 4 | 4 | | @ 3 days 5 | 5 | | (3 rows) \c :provider_dsn UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" + '10 seconds'::interval WHERE id > 3; DELETE FROM basic_dml WHERE id = 3; INSERT INTO basic_dml VALUES (3, 99, 'bazbaz', '2 years 1 hour'::interval); INSERT INTO basic_dml VALUES (7, 100, 'bazbaz', '2 years 1 hour'::interval); UPDATE basic_dml SET data = 'baz' WHERE id in (3,7); -- This update would be filtered at subscriber SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 1 | 1 | foo1 | @ 50 secs 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | baz | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | 7 | 100 | baz | @ 2 years 1 hour (6 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | SomeThing | subonly | subonly_def ----+-------+--------+--------------------+---------+------------- 2 | 2 | bar2 | @ 84 days -10 secs | | 99 3 | 99 | bazbaz | @ 2 years 1 hour | | 99 4 | 4 | | @ 3 days 10 secs | | 99 5 | 5 | | | | 99 7 | 100 | bazbaz | @ 2 years 1 hour | | 99 (5 rows) \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 3; -- This update would again start to be received at subscriber DELETE FROM basic_dml WHERE data = 'baz'; -- Delete reaches the subscriber for a filtered row INSERT INTO basic_dml VALUES (6, 100, 'baz', '2 years 1 hour'::interval); -- insert would be filtered SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 1 | 1 | foo1 | @ 50 secs 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | bar | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | 6 | 100 | baz | @ 2 years 1 hour (6 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | SomeThing | subonly | subonly_def ----+-------+------+--------------------+---------+------------- 2 | 2 | bar2 | @ 84 days -10 secs | | 99 3 | 99 | bar | @ 2 years 1 hour | | 99 4 | 4 | | @ 3 days 10 secs | | 99 5 | 5 | | | | 99 (4 rows) \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 6; UPDATE basic_dml SET data = 'abcd' WHERE id = 6; -- These updates would continue to be missed on subscriber -- as it does not have the primary key SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | bar | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (4 rows) -- transaction timestamp should be updated for each row (see #148) SELECT count(DISTINCT subonly_def_ts) = count(DISTINCT insert_xid) FROM basic_dml; ?column? ---------- t (1 row) -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+------------------ 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (2 rows) -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- (0 rows) -- copy \c :provider_dsn \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ------+-------+------+----------- 9000 | 1 | aaa | @ 1 hour 9002 | 3 | ccc | @ 3 mins 9003 | 4 | ddd | @ 4 days (3 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_jsonb ( json_type text primary key, test_json jsonb ); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO test_jsonb VALUES ('scalar','"a scalar"'), ('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), ('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); SELECT * FROM pglogical.replication_set_add_table('default', 'test_jsonb', true, row_filter := $rf$(test_json ->> 'field2') IS DISTINCT FROM 'val2' $rf$); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF NOT EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status != 'r') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT * FROM test_jsonb ORDER BY json_type; json_type | test_json -----------+-------------------------------------------------------------------- array | ["zero", "one", "two", null, "four", "five", [1, 2, 3], {"f1": 9}] scalar | "a scalar" (2 rows) \c :provider_dsn -- Filter may refer to not-replicated columns SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, columns := ARRAY['id', 'data'], row_filter := $rf$other = 2$rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml(other, data, "SomeThing") VALUES (2, 'itstwo', '1 second'::interval); SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; other | data | SomeThing -------+--------+----------- 2 | itstwo | @ 1 sec (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- 'other' will be NULL as it wasn't in the repset -- even though we filtered on it. So will SomeThing. SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; other | data | SomeThing -------+--------+----------- | itstwo | (1 row) \c :provider_dsn --------------------------------------------------- -- Enhanced function tests covering basic plpgsql --------------------------------------------------- CREATE FUNCTION func_plpgsql_simple(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN RETURN arg; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_simple(other) = 100 $rf$); replication_set_add_table --------------------------- t (1 row) -- Should FAIL due to dependency -- -- FIXME: Succeeds incorrectly (RM#5880) leading to -- cache lookup failed for function" errors in logs if allowed to commit -- BEGIN; DROP FUNCTION func_plpgsql_simple(integer); ROLLBACK; INSERT INTO basic_dml (other) VALUES (100), (101); SELECT other FROM basic_dml WHERE other IN (100,101); other ------- 100 101 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (100,101); other ------- 100 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_logic(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN IF arg = 200 THEN RETURN arg; ELSE RETURN 0; END IF; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_logic(other) = other $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (200), (201); SELECT other FROM basic_dml WHERE other IN (200,201); other ------- 200 201 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (200,201); other ------- 200 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_security_definer(arg integer) RETURNS integer LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RAISE NOTICE 'c_u: %, s_u: %', current_user, session_user; RETURN arg; END; $$; CREATE ROLE temp_owner; ALTER FUNCTION func_plpgsql_security_definer(integer) OWNER TO temp_owner; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_security_definer(other) = 300 $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (300), (301); SELECT other FROM basic_dml WHERE other IN (300,301); other ------- 300 301 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (300,301); other ------- 300 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_exception(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN BEGIN SELECT arg/0; EXCEPTION WHEN division_by_zero THEN RETURN arg; END; RAISE EXCEPTION 'should be unreachable'; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_exception(other) = 400 $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (400), (401); SELECT other FROM basic_dml WHERE other IN (400,401); other ------- 400 401 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (400,401); other ------- 400 (1 row) \c :provider_dsn -- Should not be able to use a SETOF or TABLE func directly -- but we can do it via a wrapper: CREATE FUNCTION func_plpgsql_srf_retq(arg integer) RETURNS TABLE (result integer, dummy boolean) LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RETURN QUERY SELECT arg * x, true FROM generate_series(1,2) x; RETURN; END; $$; -- fails with SRF context error BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ (func_plpgsql_srf_retq(other)).result = 500 $rf$); ERROR: set-returning functions are not allowed in check constraints ROLLBACK; CREATE FUNCTION func_plpgsql_call_set(arg integer) RETURNS boolean LANGUAGE plpgsql AS $$ BEGIN RETURN (SELECT true FROM func_plpgsql_srf_retq(arg) WHERE result = arg * 2); END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_call_set(other) $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (500), (501); SELECT other FROM basic_dml WHERE other IN (500,501); other ------- 500 501 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (500,501); other ------- 500 501 (2 rows) \c :provider_dsn DROP FUNCTION func_plpgsql_simple(integer); DROP FUNCTION func_plpgsql_logic(integer); DROP FUNCTION func_plpgsql_security_definer(integer); DROP FUNCTION func_plpgsql_exception(integer); DROP FUNCTION func_plpgsql_srf_retq(integer); DROP FUNCTION func_plpgsql_call_set(integer); DROP ROLE temp_owner; --------------------------------------------------- -- ^^^ End plpgsql tests --------------------------------------------------- \c :provider_dsn \set VERBOSITY terse DROP FUNCTION funcn_add(integer, integer); DROP FUNCTION funcn_nochange(text); DROP FUNCTION funcn_get_curr_decade(); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; DROP TABLE public.test_jsonb CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default NOTICE: drop cascades to table public.test_jsonb membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/row_filter_1.out000066400000000000000000000511661415142317000211210ustar00rootroot00000000000000-- row based filtering SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, "SomeThing" interval, insert_xid bigint DEFAULT txid_current() ); $$); replicate_ddl_command ----------------------- t (1 row) -- used to check if initial copy does row filtering \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV -- create some functions: CREATE FUNCTION funcn_add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; create function funcn_nochange(text) returns text as 'select $1 limit 1' language sql stable; create or replace function funcn_get_curr_decade() returns integer as $$ (SELECT EXTRACT(DECADE FROM NOW()):: integer); $$ language sql volatile; -- we allow volatile functions, it's user's responsibility to not do writes SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'current_user = data'); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) -- fail -- subselect SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := '(SELECT count(*) FROM pg_class) > 1'); ERROR: cannot use subquery in check constraint -- fail -- SELECT SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'SELECT true'); ERROR: syntax error at or near "SELECT" CONTEXT: invalid row_filter expression "SELECT true" -- fail -- nonexisting column SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'foobar'); ERROR: column "foobar" does not exist -- fail -- not coercable to bool SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'data'); ERROR: argument of row_filter must be type boolean, not type text SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id between 2 AND 4$rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := NULL); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id > funcn_add(1,2) $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$data = funcn_nochange('baz') $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ other > funcn_get_curr_decade() $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) -- use this filter for rest of the test SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', true, row_filter := $rf$id > 1 AND data IS DISTINCT FROM 'baz' AND data IS DISTINCT FROM 'bbb'$rf$); replication_set_add_table --------------------------- t (1 row) SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname = 'basic_dml'; nspname | relname | set_name ---------+-----------+---------- public | basic_dml | default (1 row) -- fail, the membership in repset depends on data column \set VERBOSITY terse ALTER TABLE basic_dml DROP COLUMN data; ERROR: cannot drop column data of table basic_dml because other objects depend on it \set VERBOSITY default SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- wait for the initial data to copy BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription'); wait_for_subscription_sync_complete ------------------------------------- (1 row) COMMIT; SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ------+-------+------+----------- 5000 | 1 | aaa | @ 1 hour 5002 | 3 | ccc | @ 3 mins 5003 | 4 | ddd | @ 4 days (3 rows) ALTER TABLE public.basic_dml ADD COLUMN subonly integer; ALTER TABLE public.basic_dml ADD COLUMN subonly_def integer DEFAULT 99; ALTER TABLE public.basic_dml ADD COLUMN subonly_def_ts timestamptz DEFAULT current_timestamp; \c :provider_dsn TRUNCATE basic_dml; -- check basic insert replication INSERT INTO basic_dml(other, data, "SomeThing") VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------------- 2 | 4 | bar | @ 84 days 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (3 rows) -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, "SomeThing" = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- 2 | 4 | bar | @ 84 days 4 | 4 | | @ 3 days 5 | 1 | | (3 rows) -- update multiple rows \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- 2 | 2 | bar2 | @ 84 days 4 | 4 | | @ 3 days 5 | 5 | | (3 rows) \c :provider_dsn UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" + '10 seconds'::interval WHERE id > 3; DELETE FROM basic_dml WHERE id = 3; INSERT INTO basic_dml VALUES (3, 99, 'bazbaz', '2 years 1 hour'::interval); INSERT INTO basic_dml VALUES (7, 100, 'bazbaz', '2 years 1 hour'::interval); UPDATE basic_dml SET data = 'baz' WHERE id in (3,7); -- This update would be filtered at subscriber SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 1 | 1 | foo1 | @ 50 secs 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | baz | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | 7 | 100 | baz | @ 2 years 1 hour (6 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | SomeThing | subonly | subonly_def ----+-------+--------+--------------------+---------+------------- 2 | 2 | bar2 | @ 84 days -10 secs | | 99 3 | 99 | bazbaz | @ 2 years 1 hour | | 99 4 | 4 | | @ 3 days 10 secs | | 99 5 | 5 | | | | 99 7 | 100 | bazbaz | @ 2 years 1 hour | | 99 (5 rows) \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 3; -- This update would again start to be received at subscriber DELETE FROM basic_dml WHERE data = 'baz'; -- Delete reaches the subscriber for a filtered row INSERT INTO basic_dml VALUES (6, 100, 'baz', '2 years 1 hour'::interval); -- insert would be filtered SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 1 | 1 | foo1 | @ 50 secs 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | bar | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | 6 | 100 | baz | @ 2 years 1 hour (6 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; id | other | data | SomeThing | subonly | subonly_def ----+-------+------+--------------------+---------+------------- 2 | 2 | bar2 | @ 84 days -10 secs | | 99 3 | 99 | bar | @ 2 years 1 hour | | 99 4 | 4 | | @ 3 days 10 secs | | 99 5 | 5 | | | | 99 (4 rows) \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 6; UPDATE basic_dml SET data = 'abcd' WHERE id = 6; -- These updates would continue to be missed on subscriber -- as it does not have the primary key SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+-------------------- 2 | 2 | bar2 | @ 84 days -10 secs 3 | 99 | bar | @ 2 years 1 hour 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (4 rows) -- transaction timestamp should be updated for each row (see #148) SELECT count(DISTINCT subonly_def_ts) = count(DISTINCT insert_xid) FROM basic_dml; ?column? ---------- t (1 row) -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+------------------ 4 | 4 | | @ 3 days 10 secs 5 | 5 | | (2 rows) -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ----+-------+------+----------- (0 rows) -- copy \c :provider_dsn \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; id | other | data | SomeThing ------+-------+------+----------- 9000 | 1 | aaa | @ 1 hour 9002 | 3 | ccc | @ 3 mins 9003 | 4 | ddd | @ 4 days (3 rows) \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_jsonb ( json_type text primary key, test_json jsonb ); $$); replicate_ddl_command ----------------------- t (1 row) INSERT INTO test_jsonb VALUES ('scalar','"a scalar"'), ('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), ('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); SELECT * FROM pglogical.replication_set_add_table('default', 'test_jsonb', true, row_filter := $rf$(test_json ->> 'field2') IS DISTINCT FROM 'val2' $rf$); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF NOT EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status != 'r') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT * FROM test_jsonb ORDER BY json_type; json_type | test_json -----------+-------------------------------------------------------------------- array | ["zero", "one", "two", null, "four", "five", [1, 2, 3], {"f1": 9}] scalar | "a scalar" (2 rows) \c :provider_dsn -- Filter may refer to not-replicated columns SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, columns := ARRAY['id', 'data'], row_filter := $rf$other = 2$rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml(other, data, "SomeThing") VALUES (2, 'itstwo', '1 second'::interval); SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; other | data | SomeThing -------+--------+----------- 2 | itstwo | @ 1 sec (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- 'other' will be NULL as it wasn't in the repset -- even though we filtered on it. So will SomeThing. SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; other | data | SomeThing -------+--------+----------- | itstwo | (1 row) \c :provider_dsn --------------------------------------------------- -- Enhanced function tests covering basic plpgsql --------------------------------------------------- CREATE FUNCTION func_plpgsql_simple(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN RETURN arg; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_simple(other) = 100 $rf$); replication_set_add_table --------------------------- t (1 row) -- Should FAIL due to dependency -- -- FIXME: Succeeds incorrectly (RM#5880) leading to -- cache lookup failed for function" errors in logs if allowed to commit -- BEGIN; DROP FUNCTION func_plpgsql_simple(integer); ROLLBACK; INSERT INTO basic_dml (other) VALUES (100), (101); SELECT other FROM basic_dml WHERE other IN (100,101); other ------- 100 101 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (100,101); other ------- 100 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_logic(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN IF arg = 200 THEN RETURN arg; ELSE RETURN 0; END IF; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_logic(other) = other $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (200), (201); SELECT other FROM basic_dml WHERE other IN (200,201); other ------- 200 201 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (200,201); other ------- 200 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_security_definer(arg integer) RETURNS integer LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RAISE NOTICE 'c_u: %, s_u: %', current_user, session_user; RETURN arg; END; $$; CREATE ROLE temp_owner; ALTER FUNCTION func_plpgsql_security_definer(integer) OWNER TO temp_owner; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_security_definer(other) = 300 $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (300), (301); SELECT other FROM basic_dml WHERE other IN (300,301); other ------- 300 301 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (300,301); other ------- 300 (1 row) \c :provider_dsn CREATE FUNCTION func_plpgsql_exception(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN BEGIN SELECT arg/0; EXCEPTION WHEN division_by_zero THEN RETURN arg; END; RAISE EXCEPTION 'should be unreachable'; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_exception(other) = 400 $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (400), (401); SELECT other FROM basic_dml WHERE other IN (400,401); other ------- 400 401 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (400,401); other ------- 400 (1 row) \c :provider_dsn -- Should not be able to use a SETOF or TABLE func directly -- but we can do it via a wrapper: CREATE FUNCTION func_plpgsql_srf_retq(arg integer) RETURNS TABLE (result integer, dummy boolean) LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RETURN QUERY SELECT arg * x, true FROM generate_series(1,2) x; RETURN; END; $$; -- fails with SRF context error BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ (func_plpgsql_srf_retq(other)).result = 500 $rf$); ERROR: set-returning functions are not allowed in check constraints ROLLBACK; CREATE FUNCTION func_plpgsql_call_set(arg integer) RETURNS boolean LANGUAGE plpgsql AS $$ BEGIN RETURN (SELECT true FROM func_plpgsql_srf_retq(arg) WHERE result = arg * 2); END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_call_set(other) $rf$); replication_set_add_table --------------------------- t (1 row) INSERT INTO basic_dml (other) VALUES (500), (501); SELECT other FROM basic_dml WHERE other IN (500,501); other ------- 500 501 (2 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (500,501); other ------- 500 501 (2 rows) \c :provider_dsn DROP FUNCTION func_plpgsql_simple(integer); DROP FUNCTION func_plpgsql_logic(integer); DROP FUNCTION func_plpgsql_security_definer(integer); DROP FUNCTION func_plpgsql_exception(integer); DROP FUNCTION func_plpgsql_srf_retq(integer); DROP FUNCTION func_plpgsql_call_set(integer); DROP ROLE temp_owner; --------------------------------------------------- -- ^^^ End plpgsql tests --------------------------------------------------- \c :provider_dsn \set VERBOSITY terse DROP FUNCTION funcn_add(integer, integer); DROP FUNCTION funcn_nochange(text); DROP FUNCTION funcn_get_curr_decade(); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; DROP TABLE public.test_jsonb CASCADE; $$); NOTICE: drop cascades to table public.basic_dml membership in replication set default NOTICE: drop cascades to table public.test_jsonb membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/row_filter_sampling.out000066400000000000000000000127731415142317000225740ustar00rootroot00000000000000-- row based filtering SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- testing volatile sampling function in row_filter SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_tablesample (id int primary key, name text) WITH (fillfactor=10); $$); replicate_ddl_command ----------------------- t (1 row) -- use fillfactor so we don't have to load too much data to get multiple pages INSERT INTO test_tablesample SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i); create or replace function funcn_get_system_sample_count(integer, integer) returns bigint as $$ (SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM ($1) REPEATABLE ($2)); $$ language sql volatile; create or replace function funcn_get_bernoulli_sample_count(integer, integer) returns bigint as $$ (SELECT count(*) FROM test_tablesample TABLESAMPLE BERNOULLI ($1) REPEATABLE ($2)); $$ language sql volatile; SELECT * FROM pglogical.replication_set_add_table('default', 'test_tablesample', false, row_filter := $rf$id > funcn_get_system_sample_count(100, 3) $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM pglogical.replication_set_remove_table('default', 'test_tablesample'); replication_set_remove_table ------------------------------ t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'test_tablesample', true, row_filter := $rf$id > funcn_get_bernoulli_sample_count(10, 0) $rf$); replication_set_add_table --------------------------- t (1 row) SELECT * FROM test_tablesample ORDER BY id limit 5; id | name ----+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 0 | 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 1 | 11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111 2 | 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222 3 | 33333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333 4 | 44444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444 (5 rows) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_tablesample'); wait_for_table_sync_complete ------------------------------ (1 row) COMMIT; SELECT sync_kind, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status WHERE sync_relname = 'test_tablesample'; sync_kind | sync_nspname | sync_relname | sync_status -----------+--------------+------------------+------------- d | public | test_tablesample | r (1 row) SELECT * FROM test_tablesample ORDER BY id limit 5; id | name ----+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 3 | 33333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333 4 | 44444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444 5 | 55555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555 6 | 66666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666 7 | 77777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777 (5 rows) \c :provider_dsn \set VERBOSITY terse DROP FUNCTION funcn_get_system_sample_count(integer, integer); DROP FUNCTION funcn_get_bernoulli_sample_count(integer, integer); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_tablesample CASCADE; $$); NOTICE: drop cascades to table public.test_tablesample membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/toasted.out000066400000000000000000001334201415142317000201620ustar00rootroot00000000000000-- test toasted data SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn BEGIN; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.toasted ( id serial primary key, other text, data text NOT NULL ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'toasted'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.toasted ALTER COLUMN data SET STORAGE EXTERNAL; $$); replicate_ddl_command ----------------------- t (1 row) COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) -- check replication of toast values INSERT INTO toasted(other, data) VALUES('foo', repeat('1234567890', 300)); -- check that unchanged toast values work correctly UPDATE toasted SET other = 'foo2'; -- check that changed toast values are replicated UPDATE toasted SET other = 'foo3', data = '-'||data; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM toasted ORDER BY id; id | other | data ----+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1 | foo3 | -123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 (1 row) \c :provider_dsn \copy toasted from stdin with csv SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM toasted ORDER BY id; id | other | data ------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1 | foo3 | -123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 9000 | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | bar 9001 | bbb | ccc 9002 | ddd | eee 9003 | bar | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 9004 | fff | hhh (6 rows) \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.toasted CASCADE; $$); NOTICE: drop cascades to table public.toasted membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/expected/triggers.out000066400000000000000000000137601415142317000203510ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_trg_data(id serial primary key, data text); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'test_trg_data'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn CREATE TABLE test_trg_hist(table_name text, action text, action_id serial, original_data text, new_data text); CREATE FUNCTION test_trg_data_hist_fn() RETURNS TRIGGER AS $$ BEGIN IF (TG_OP = 'UPDATE') THEN INSERT INTO test_trg_hist (table_name,action,original_data,new_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(OLD.*), ROW(NEW.*)); RETURN NEW; ELSIF (TG_OP = 'DELETE') THEN INSERT INTO test_trg_hist (table_name,action,original_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(OLD.*)); RETURN OLD; ELSIF (TG_OP = 'INSERT') THEN INSERT INTO test_trg_hist (table_name,action,new_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(NEW.*)); RETURN NEW; ELSE RAISE WARNING 'Unknown action'; RETURN NULL; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER test_trg_data_hist_trg AFTER INSERT OR UPDATE OR DELETE ON test_trg_data FOR EACH ROW EXECUTE PROCEDURE test_trg_data_hist_fn(); \c :provider_dsn INSERT INTO test_trg_data(data) VALUES ('no_history'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM test_trg_data; id | data ----+------------ 1 | no_history (1 row) SELECT * FROM test_trg_hist; table_name | action | action_id | original_data | new_data ------------+--------+-----------+---------------+---------- (0 rows) ALTER TABLE test_trg_data ENABLE REPLICA TRIGGER test_trg_data_hist_trg; \c :provider_dsn INSERT INTO test_trg_data(data) VALUES ('yes_history'); UPDATE test_trg_data SET data = 'yes_history'; DELETE FROM test_trg_data; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn SELECT * FROM test_trg_data; id | data ----+------ (0 rows) SELECT * FROM test_trg_hist; table_name | action | action_id | original_data | new_data ---------------+--------+-----------+-----------------+----------------- test_trg_data | I | 1 | | (2,yes_history) test_trg_data | U | 2 | (1,no_history) | (1,yes_history) test_trg_data | U | 3 | (2,yes_history) | (2,yes_history) test_trg_data | D | 4 | (1,yes_history) | test_trg_data | D | 5 | (2,yes_history) | (5 rows) DROP TABLE test_trg_hist CASCADE; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); replicate_ddl_command ----------------------- t (1 row) SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); replication_set_add_table --------------------------- t (1 row) SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- create row filter trigger CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS $$ BEGIN IF (TG_OP in ('UPDATE', 'INSERT')) THEN -- treating 'DELETE' as pass-through IF (NEW.id > 1 AND NEW.data IS DISTINCT FROM 'baz' AND NEW.data IS DISTINCT FROM 'bbb') THEN RETURN NEW; ELSE RETURN NULL; END IF; ELSE RAISE WARNING 'Unknown action'; RETURN NULL; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER filter_basic_dml_trg BEFORE INSERT OR UPDATE ON basic_dml FOR EACH ROW EXECUTE PROCEDURE filter_basic_dml_fn(); \c :provider_dsn -- insert into table at provider \COPY basic_dml FROM STDIN WITH CSV SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- rows received at suscriber as trigger is not enabled yet. SELECT * from basic_dml ORDER BY id; id | other | data | something ------+-------+------+----------- 5000 | 1 | aaa | @ 1 hour 5001 | 2 | bbb | @ 2 years 5002 | 3 | ccc | @ 3 mins 5003 | 4 | ddd | @ 4 days (4 rows) -- Now enable trigger: ALTER TABLE basic_dml ENABLE REPLICA TRIGGER filter_basic_dml_trg; \c :provider_dsn TRUNCATE basic_dml; -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); wait_slot_confirm_lsn ----------------------- (1 row) \c :subscriber_dsn -- rows filtered at suscriber as trigger is enabled. SELECT * from basic_dml ORDER BY id; id | other | data | something ----+-------+------+----------------- 2 | 4 | bar | @ 84 days 4 | 2 | qux | @ 8 mons 2 days 5 | 1 | | (3 rows) \set VERBOSITY terse DROP FUNCTION test_trg_data_hist_fn() CASCADE; NOTICE: drop cascades to trigger test_trg_data_hist_trg on table test_trg_data DROP FUNCTION filter_basic_dml_fn() CASCADE; NOTICE: drop cascades to trigger filter_basic_dml_trg on table basic_dml \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_trg_data CASCADE; DROP TABLE public.basic_dml CASCADE; $$); NOTICE: drop cascades to table public.test_trg_data membership in replication set default NOTICE: drop cascades to table public.basic_dml membership in replication set default replicate_ddl_command ----------------------- t (1 row) pglogical-REL2_4_1/internals-doc/000077500000000000000000000000001415142317000167245ustar00rootroot00000000000000pglogical-REL2_4_1/internals-doc/.gitignore000066400000000000000000000000161415142317000207110ustar00rootroot00000000000000protocol.html pglogical-REL2_4_1/internals-doc/DESIGN.md000066400000000000000000000261601415142317000202240ustar00rootroot00000000000000# Design decisions Explanations of why things are done the way they are. ## Why does pglogical_output exist when there's wal2json etc? `pglogical_output` does plenty more than convert logical decoding change messages to a wire format and send them to the client. It handles format negotiations, sender-side filtering using pluggable hooks (and the associated plugin handling), etc. The protocol its self is also important, and incorporates elements like binary datum transfer that can't be easily or efficiently achieved with json. ## Custom binary protocol Why do we have a custom binary protocol inside the walsender / copy both protocol, rather than using a json message representation? Speed and compactness. It's expensive to create json, with lots of allocations. It's expensive to decode it too. You can't represent raw binary in json, and must encode it, which adds considerable overhead for some data types. Using the obvious, easy to decode json representations also makes it difficult to do later enhancements planned for the protocol and decoder, like caching row metadata. The protocol implementation is fairly well encapsulated, so in future it should be possible to emit json instead for clients that request it. Right now that's not the priority as tools like wal2json already exist for that. ## Column metadata The output plugin sends metadata for columns - at minimum, the column names - before each row that first refers to that relation. The reason metadata must be sent is that the upstream and downstream table's attnos don't necessarily correspond. The column names might, and their ordering might even be the same, but any column drop or column type change will result in a dropped column on one side. So at the user level the tables look the same, but their attnos don't match, and if we rely on attno for replication we'll get the wrong data in the wrong columns. Not pretty. That could be avoided by requiring that the downstream table be strictly maintained by DDL replication, but: * We don't want to require DDL replication * That won't work with multiple upstreams feeding into a table * The initial table creation still won't be correct if the table has dropped columns, unless we (ab)use `pg_dump`'s `--binary-upgrade` support to emit tables with dropped columns, which we don't want to do. So despite the bandwidth cost, we need to send metadata. Support for type metadata is penciled in to the protocol so that clients that don't have table definitions at all - like queueing engines - can decode the data. That'll also permit type validation sanity checking on the apply side with logical replication. The upstream expects the client to cache this metadata and re-use it when data is sent for the relation again. Cache size controls, an LRU and purge notifications will be added later; for now the client must cache everything indefinitely. ## Relation metadata cache size controls The relation metadata cache will have downstream size control added. The downstream will send a parameter indicating that it supports caching, and the maximum cache size desired. Since there is no downstream-to-upstream communication after the startup params there's no easy way for the downstream to tell the upstream when it purges cache entries. So the downstream cache is a slave cache that must depend strictly on the upstream cache. The downstream tells the upstream how to manage its cache and then after that it just follows orders. To keep the caches in sync so the upstream never sends a row without knowing the downstream has metadata for it cached the downstream must always cache relation metadata when it receives it, and may not purge it from its cache until it receives a purge message for that relation from the upstream. If a new metadata message for the same relation arrives it *must* replace the old entry in the cache. The downstream does *not* have to promptly purge or invalidate cache entries when it gets purge messages from the upstream. They are just notifications that the upstream no longer expects the downstream to retain that cache entry and will re-send it if it is required again later. ## Not an extension There's no extension script for pglogical_output. That's by design. We've tried really hard to avoid needing one, allowing applications using pglogical_output to entirely define any SQL level catalogs they need and interact with them using the hooks. That way applications don't have to deal with some of their catalog data being in pglogical_output extension catalogs and some being in their own. There's no issue with dump and restore that way either. The app controls it entirely and pglogical_output doesn't need any policy or tools for it. pglogical_output is meant to be a re-usable component of other solutions. Users shouldn't need to care about it directly. ## Hooks Quite a bit of functionality that could be done directly in the output plugin is instead delegated to pluggable hooks. Replication origin filtering for example. That's because pglogical_output tries hard not to know anything about the topology of the replication cluster and leave that to applications using the plugin. It doesn't ## Hook entry point as a SQL function The hooks entry point is a SQL function that populates a passed `internal` struct with hook function pointers. The reason for this is that hooks are specified by a remote peer over the network. We can't just let the peer say "dlsym() this arbitrary function name and call it with these arguments" for fairly obvious security reasons. At bare minimum all replication using hooks would have to be superuser-only if we did that. The SQL entry point is only called once per decoding session and the rest of the calls are plain C function pointers. ## The startup reply message The protocol design choices available to `pg_logical` are constrained by being contained in the copy-both protocol within the fe/be protocol, running as a logical decoding plugin. The plugin has no direct access to the network socket and can't send or receive messages whenever it wants, only under the control of the walsender and logical decoding framework. The only opportunity for the client to send data directly to the logical decoding plugin is in the `START_REPLICATION` parameters, and it can't send anything to the client before that point. This means there's no opportunity for a multi-way step negotiation between client and server. We have to do all the negotiation we're going to in a single exchange of messages - the setup parameters and then the replication start message. All the client can do if it doesn't like the offer the server makes is disconnect and try again with different parameters. That's what the startup message is for. It reports the plugin's capabilities and tells the client which requested options were honoured. This gives the client a chance to decide if it's happy with the output plugin's decision or if it wants to reconnect and try again with different options. Iterative negotiation, effectively. ## Unrecognised parameters MUST be ignored by client and server To ensure upward and downward compatibility, the output plugin must ignore parameters set by the client if it doesn't recognise them, and the client must ignore parameters it doesn't recognise in the server's startup reply message. This ensures that older clients can talk to newer servers and vice versa. For this to work, the server must never enable new functionality such as protocol message types, row formats, etc without the client explicitly specifying via a startup parameter that it understands the new functionality. Everything must be negotiated. Similarly, a newer client talking to an older server may ask the server to enable functionality, but it can't assume the server will actually honour that request. It must check the server's startup reply message to see if the server confirmed that it enabled the requested functionality. It might choose to disconnect and report an error to the user if the server didn't do what it asked. This can be important, e.g. when a security-significant hook is specified. ## Support for transaction streaming Presently logical decoding requires that a transaction has committed before it can *begin* sending it to the client. This means long running xacts can take 2x as long, since we can't start apply on the replica until the xact is committed on the master. Additionally, a big xact will cause large delays in apply of smaller transactions because logical decoding reorders transactions into strict commit order and replays them in that sequence. Small transactions that committed after the big transaction cannot be replayed to the replica until the big transaction is transferred over the wire, and we can't get a head start on that while it's still running. Finally, the accumulation of a big transaction in the reorder buffer means that storage on the upstream must be sufficient to hold the entire transaction until it can be streamed to the replica and discarded. That is in addition to the copy in retained WAL, which cannot be purged until replay is confirmed past commit for that xact. The temporary copy serves no data safety purpose; it can be regenerated from retained WAL is just a spool file. There are big upsides to waiting until commit. Rolled-back transactions and subtransactions are never sent at all. The apply/downstream side is greatly simplified by not needing to do transaction ordering, worry about interdependencies and conflicts during apply. The commit timestamp is known from the beginning of replay, allowing for smarter conflict resolution behaviour in multi-master scenarios. Nonetheless sometimes we want to be able to stream changes in advance of commit. So we need the ability to start streaming a transaction from the upstream as its changes are seen in WAL, either applying it immediately on the downstream or spooling it on the downstream until it's committed. This requires changes to the logical decoding facilities themselves, it isn't something pglogical_output can do alone. However, we've left room in pglogical_output to support this when support is added to logical decoding: * Flags in most message types let us add fields if we need to, like a HAS_XID flag and an extra field for the transaction ID so we can differentiate between concurrent transactions when streaming. The space isn't wasted the rest of the time. * The upstream isn't allowed to send new message types, etc, without a capability flag being set by the client. So for interleaved xacts we won't enable them in logical decoding unless the client tells us the client is prepared to cope with them by sending additional startup parameters. Note that for consistency reasons we still have to commit things in the same order on the downstream. The purpose of transaction streaming is to reduce the latency between the commit of the last xact before a big one and the first xact after the big one, minimising the duration of the stall in the flow of smaller xacts perceptible on the downstream. Transaction streaming also makes parallel apply on the downstream possible, though it is not necessary to have parallel apply to benefit from transaction streaming. Parallel apply has further complexities that are outside the scope of the output plugin design. pglogical-REL2_4_1/internals-doc/OUTPUT.md000066400000000000000000000715741415142317000203240ustar00rootroot00000000000000# `pglogical` Output Plugin This is the [logical decoding](http://www.postgresql.org/docs/current/static/logicaldecoding.html) [output plugin](http://www.postgresql.org/docs/current/static/logicaldecoding-output-plugin.html) for `pglogical`. Its purpose is to extract a change stream from a PostgreSQL database and send it to a client over a network connection using a well-defined, efficient protocol that multiple different applications can consume. The primary purpose of `pglogical_output` is to supply data to logical streaming replication solutions, but any application can potentially use its data stream. The output stream is designed to be compact and fast to decode, and the plugin supports upstream filtering of data (through hooks) so that only the required information is sent. Only one database is replicated, rather than the whole PostgreSQL install. A subset of that database may be selected for replication, currently based on table and on replication origin. Filtering by a WHERE clause can be supported easily in future. No triggers are required to collect the change stream and no external ticker or other daemon is required. The stream of changes is accumulated using [replication slots](http://www.postgresql.org/docs/current/static/logicaldecoding-explanation.html#AEN66446), as supported in PostgreSQL 9.4 or newer, and sent on top of the [PostgreSQL streaming replication protocol](http://www.postgresql.org/docs/current/static/protocol-replication.html). Unlike block-level ("physical") streaming replication, the change stream from `pglogical_output` is compatible across different PostgreSQL versions and can even be consumed by non-PostgreSQL clients. Because logical decoding is used, only the changed rows are sent on the wire. There's no index change data, no vacuum activity, etc transmitted. The use of a replication slot means that the change stream is reliable and crash-safe. If the client disconnects or crashes it can reconnect and resume replay from the last message that client processed. Server-side changes that occur while the client is disconnected are accumulated in the queue to be sent when the client reconnects. This reliability also means that server-side resources are consumed whether or not a client is connected. # Why another output plugin? See [`DESIGN.md`](DESIGN.md) for a discussion of why using one of the existing generic logical decoding output plugins like `wal2json` to drive a logical replication downstream isn't ideal. It's mostly about speed. # Architecture and high level interaction The output plugin is loaded by a PostgreSQL walsender process when a client connects to PostgreSQL using the PostgreSQL wire protocol with connection option `replication=database`, then uses [the `CREATE_REPLICATION_SLOT ... LOGICAL ...` or `START_REPLICATION SLOT ... LOGICAL ...` commands](http://www.postgresql.org/docs/current/static/logicaldecoding-walsender.html) to start streaming changes. (It can also be used via [SQL level functions](http://www.postgresql.org/docs/current/static/logicaldecoding-sql.html) over a non-replication connection, but this is mainly for debugging purposes). The client supplies parameters to the `START_REPLICATION SLOT ... LOGICAL ...` command to specify the version of the `pglogical` protocol it supports, whether it wants binary format, etc. The output plugin processes the connection parameters and the connection enters streaming replication protocol mode, sometimes called "COPY BOTH" mode because it's based on the protocol used for the `COPY` command. PostgreSQL then calls functions in this plugin to send it a stream of transactions to decode and translate into network messages. This stream of changes continues until the client disconnects. The only client-to-server interaction after startup is the sending of periodic feedback messages that allow the replication slot to discard no-longer-needed change history. The client *must* send feedback, otherwise `pg_xlog` on the server will eventually fill up and the server will stop working. # Usage The overall flow of client/server interaction is: * Client makes PostgreSQL fe/be protocol connection to server * Connection options must include `replication=database` and `dbname=[...]` parameters * The PostgreSQL client library can be `libpq` or anything else that supports the replication sub-protocol * The same mechanisms are used for authentication and protocol encryption as for a normal non-replication connection * Client issues `IDENTIFY_SYSTEM` * Server responds with a single row containing system identity info * Client issues `CREATE_REPLICATION_SLOT slotname LOGICAL 'pglogical'` if it's setting up for the first time * Server responds with success info and a snapshot identifier * Client may at this point use the snapshot identifier on other connections while leaving this one idle * Client issues `START_REPLICATION SLOT slotname LOGICAL 0/0 (...options...)` to start streaming, which loops: * Server emits `pglogical` message block encapsulated in a replication protocol `CopyData` message * Client receives and unwraps message, then decodes the `pglogical` message block * Client intermittently sends a standby status update message to server to confirm replay * ... until client sends a graceful connection termination message on the fe/be protocol level or the connection is broken The details of `IDENTIFY_SYSTEM`, `CREATE_REPLICATION_SLOT` and `START_REPLICATION` are discussed in the [replication protocol docs](http://www.postgresql.org/docs/current/static/protocol-replication.html) and will not be repeated here. ## Make a replication connection To use the `pglogical` plugin you must first establish a PostgreSQL FE/BE protocol connection using the client library of your choice, passing `replication=database` as one of the connection parameters. `database` is a literal string and is not replaced with the database name; instead the database name is passed separately in the usual `dbname` parameter. Note that `replication` is not a GUC (configuration parameter) and may not be passed in the `options` parameter on the connection, it's a top-level parameter like `user` or `dbname`. Example connection string for `libpq`: 'user=postgres replication=database sslmode=verify-full dbname=mydb' The plug-in name to pass on logical slot creation is `'pglogical'`. Details are in the replication protocol docs. ## Get system identity If required you can use the `IDENTIFY_SYSTEM` command, which reports system information: systemid | timeline | xlogpos | dbname | dboid ---------------------+----------+-----------+--------+------- 6153224364663410513 | 1 | 0/C429C48 | testd | 16385 (1 row) Details are in the replication protocol docs. ## Create the slot if required If your application creates its own slots on first use and hasn't previously connected to this database on this system you'll need to create a replication slot. This keeps track of the client's replay state even while it's disconnected. The slot name may be anything your application wants up to a limit of 63 characters in length. It's strongly advised that the slot name clearly identify the application and the host it runs on. Pass `pglogical` as the plugin name. e.g. CREATE_REPLICATION_SLOT "reporting_host_42" LOGICAL "pglogical"; `CREATE_REPLICATION_SLOT` returns a snapshot identifier that may be used with [`SET TRANSACTION SNAPSHOT`](http://www.postgresql.org/docs/current/static/sql-set-transaction.html) to see the database's state as of the moment of the slot's creation. The first change streamed from the slot will be the change immediately after this snapshot was taken. The snapshot is useful when cloning the initial state of a database being replicated. Applications that want to see the change stream going forward, but don't care about the initial state, can ignore this. The snapshot is only valid as long as the connection that issued the `CREATE_REPLICATION_SLOT` remains open and has not run another command. ## Send replication parameters The client now sends: START_REPLICATION SLOT "the_slot_name" LOGICAL ( 'Expected_encoding', 'UTF8', 'Max_proto_major_version', '1', 'Min_proto_major_version', '1', ...moreparams... ); to start replication. The parameters are very important for ensuring that the plugin accepts the replication request and streams changes in the expected form. `pglogical` parameters are discussed in the separate `pglogical` protocol documentation. ## Process the startup message `pglogical_output` will send a `CopyData` message containing its startup message as the first protocol message. This message contains a set of key/value entries describing the capabilities of the upstream output plugin, its version and the Pg version, the tuple format options selected, etc. The downstream client may choose to cleanly close the connection and disconnect at this point if it doesn't like the reply. It might then inform the user or reconnect with different parameters based on what it learned from the first connection's startup message. ## Consume the change stream `pglogical_output` now sends a continuous series of `CopyData` protocol messages, each of which encapsulates a `pglogical` protocol message as documented in the separate protocol docs. These messages provide information about transaction boundaries, changed rows, etc. The stream continues until the client disconnects, the upstream server is restarted, the upstream walsender is terminated by admin action, there's a network issue, or the connection is otherwise broken. The client should send periodic feedback messages to the server to acknowledge that it's replayed to a given point and let the server release the resources it's holding in case that change stream has to be replayed again. See ["Hot standby feedback message" in the replication protocol docs](http://www.postgresql.org/docs/current/static/protocol-replication.html) for details. ## Disconnect gracefully Disconnection works just like any normal client; you use your client library's usual method for closing the connection. No special action is required before disconnection, though it's usually a good idea to send a final standby status message just before you disconnect. # Tests There are two sets of tests bundled with `pglogical_output`: the `pg_regress` regression tests and some custom Python tests for the protocol. The `pg_regress` tests check invalid parameter handling and basic functionality. They're intended for use by the buildfarm using an in-tree `make check`, but may also be run with an out-of-tree PGXS build against an existing PostgreSQL install using `make clean installcheck`. The Python tests are more comprehensive, and examine the data sent by the extension at the protocol level, validating the protocol structure, order and contents. They can run using the SQL-level logical decoding interface or, with a psycopg2 containing https://github.com/psycopg/psycopg2/pull/322, with the walsender / streaming replication protocol. The Python-based tests exercise the internal binary format support, too. See `test/README.md` for details. The tests may fail on installations that are not utf-8 encoded because the payloads of the binary protocol output will have text in different encodings, which aren't visible to psql as text to be decoded. Avoiding anything except 7-bit ascii in the tests *should* prevent the problem. # Changeset forwarding It's possible to use `pglogical_output` to cascade replication between multiple PostgreSQL servers, in combination with an appropriate client to apply the changes to the downstreams. There are two forwarding modes: * Forward everything. Transactions are replicated whether they were made directly on the immediate upstream or some other node upstream of it. This is the only option when running on 9.4. All rows from transactions are sent. Selected by not setting a row or transaction filter hook. * Filtered forwarding. Transactions are replicated unless a client-supplied transaction filter hook says to skip this transaction. Row changes are replicated unless the client-supplied row filter hook (if provided) says to skip that row. Selected by installing a transaction and/or row filter hook (see "hooks"). If the upstream server is 9.5 or newer the server will enable changeset origin information. It will set `forward_changeset_origins` to true in the startup reply message to indicate this. It will then send changeset origin messages after the `BEGIN` for each transaction, per the protocol documentation. Origin messages are omitted for transactions originating directly on the immediate upstream to save bandwidth. If `forward_changeset_origins` is true then transactions without an origin are always from the immediate upstream that’s running the decoding plugin. Note that 9.4 servers lack replication origin information and won't send it on the wire. They also always pass zeroes to the hooks. So you can't filter by origin in 9.4, and thus can't do mutual multi-master as it'll create an infinite loop. Clients may use this facility to form arbitrarily complex topologies when combined with hooks to determine which transactions are forwarded. An obvious case is bi-directional (mutual) replication. # Selective replication By specifying a row filter hook it's possible to filter the replication stream server-side so that only a subset of changes is replicated. # Hooks `pglogical_output` exposes a number of extension points where applications can modify or override its behaviour. All hooks are called in a memory context that lasts for the duration of the logical decoding session. They may switch to longer lived contexts if needed, but are then responsible for their own cleanup. ## Hook setup function The downstream must specify the fully-qualified name of a SQL-callable function on the server as the value of the `hooks.setup_function` client parameter. The SQL signature of this function is CREATE OR REPLACE FUNCTION funcname(hooks internal, memory_context internal) RETURNS void STABLE LANGUAGE c AS 'MODULE_PATHNAME'; Permissions are checked. This function must be callable by the user that the output plugin is running as. The function name *must* be schema-qualified and is parsed like any other qualified identifier. The function receives a pointer to a newly allocated structure of hook function pointers to populate as its first argument. The function must not free the argument. If the hooks need a private data area to store information across calls, the setup function should get the `MemoryContext` pointer from the 2nd argument, then `MemoryContextAlloc` a struct for the data in that memory context and store the pointer to it in `hooks->hooks_private_data`. This will then be accessible on future calls to hook functions. It need not be manually freed, as the memory context used for logical decoding will free it when it's freed. Don't put anything in it that needs manual cleanup. Hooks other than the hook setup function and the startup hook are called in a short-lived memory context. If they want to preserve anything they allocate after the hook returns they must switch to the memory context that was passed to the setup function and allocate it there. Each hook has its own C signature (defined below) and the pointers must be directly to the functions. Hooks that the client does not wish to set must be left null. An example is provided in `examples/hooks` and the argument structs are defined in `pglogical_output/hooks.h`, which is installed into the PostgreSQL source tree when the extension is installed. Each hook that is enabled results in a new startup parameter being emitted in the startup reply message. Clients must check for these and must not assume a hook was successfully activated because no error is seen. Hook functions are called in the context of the backend doing logical decoding. Except for the startup hook, hooks see the catalog state as it was at the time the transaction or row change being examined was made. Access to non-catalog tables is unsafe unless they have the `user_catalog_table` reloption set. Among other things this means that it's not safe to invoke arbitrary functions, user-defined procedures, etc, from hooks. ## Startup hook The startup hook is called when logical decoding starts. This hook can inspect the parameters passed by the client to the output plugin as in_params. These parameters *must not* be modified. It can add new parameters to the set to be returned to the client in the startup parameters message, by appending to List out_params, which is initially NIL. Each element must be a `DefElem` with the param name as the `defname` and a `String` value as the arg, as created with `makeDefElem(...)`. It and its contents must be allocated in the logical decoding memory context. For walsender based decoding the startup hook is called only once, and cleanup might not be called at the end of the session. Multiple decoding sessions, and thus multiple startup hook calls, may happen in a session if the SQL interface for logical decoding is being used. In that case it's guaranteed that the cleanup hook will be called between each startup. When successfully enabled, the output parameter `hooks.startup_hook_enabled` is set to true in the startup reply message. Unlike the other hooks, this hook sees a snapshot of the database's current state, not a time-traveled catalog state. It is safe to access all tables from this hook. Also unlike other hooks, the startup hook is called in a memory context with the same lifetime of the decoding session. It's called in the same context as the one that's passed to the hook setup hook. ## Transaction filter hook The transaction filter hook can exclude entire transactions from being decoded and replicated based on the node they originated from. It is passed a `const TxFilterHookArgs *` containing: * The hook argument supplied by the client, if any * The `RepOriginId` that this transaction originated from and must return boolean, where true retains the transaction for sending to the client and false discards it. (Note that this is the reverse sense of the low level logical decoding transaction filter hook). The hook function must *not* free the argument struct or modify its contents. The transaction filter hook is only called on PostgreSQL 9.5 and above. It is ignored on 9.4. Note that individual changes within a transaction may have different origins to the transaction as a whole; see "Origin filtering" for more details. If a transaction is filtered out, all changes are filtered out even if their origins differ from that of the transaction as a whole. When successfully enabled, the output parameter `hooks.transaction_filter_enabled` is set to true in the startup reply message. Memory allocated in this hook is freed at the end of the call. ## Row filter hook The row filter hook is called for each row. It is passed information about the table, the transaction origin, and the row origin. It is passed a `const RowFilterHookArgs*` containing: * The hook argument supplied by the client, if any * The `Relation` the change affects * The change type - 'I'nsert, 'U'pdate or 'D'elete It can return true to retain this row change, sending it to the client, or false to discard it. The function *must not* free the argument struct or modify its contents. Note that it is more efficient to exclude whole transactions with the transaction filter hook rather than filtering out individual rows. When successfully enabled, the output parameter `hooks.row_filter_enabled` is set to true in the startup reply message. Memory allocated in this hook is freed at the end of the call. ## Shutdown hook The shutdown hook is called when a decoding session ends. You can't rely on this hook being invoked reliably, since a replication-protocol walsender-based session might just terminate. It's mostly useful for cleanup to handle repeated invocations under the SQL interface to logical decoding. You don't need a hook to free memory you allocated, unless you explicitly switched to a longer lived memory context like `TopMemoryContext`. Memory allocated in the hook context will be automatically freed when the decoding session shuts down. # Limitations The advantages of logical decoding in general and `pglogical_output` in particular are discussed above. There are also some limitations that apply to `pglogical_output`, and to Pg's logical decoding in general. Notably: ## Mostly one-way communication Per the protocol documentation, the downstream can't send anything except replay progress messages to the upstream after replication begins, and can't re-initialise replication without a disconnect. To achieve downstream-to-upstream communication, clients can use a regular libpq connection to the upstream then write to tables or call functions. Alternately, a separate replication connection in the opposite direction can be created by the application to carry information from downstream to upstream. See "Protocol flow" in the protocol documentation for more information. ## Doesn't replicate global objects/shared catalog changes PostgreSQL has a number of object types that exist across all databases, stored in *shared catalogs*. These include: * Roles (users/groups) * Security labels on users and databases Such objects cannot be replicated by `pglogical_output`. They're managed with DDL that can't be captured within a single database and isn't decoded anyway. Global object changes must be synchronized via some external means. ## Physical replica failover Logical decoding cannot follow a physical replication failover because replication slot state is not replicated to physical replicas. If you fail over to a streaming replica you have to manually reconnect your logical replication clients, creating new slots, etc. This is a core PostgreSQL limitation. Also, there's no built-in way to guarantee that the logical replication slot from the failed master hasn't replayed further than the physical streaming replica you failed over to. You could receive changes on your logical decoding stream from the old master that never made it to the physical streaming replica. This is true (albeit very unlikely) *even if the physical streaming replica is synchronous* because PostgreSQL sends the replication data anyway, then just delays the commit's visibility on the master. Support for strictly ordered standbys would be required in PostgreSQL to avoid this. To achieve failover with logical replication you cannot mix in physical standbys. The logical replication client has to take responsibility for maintaining slots on logical replicas intended as failover candidates and for ensuring that the furthest-ahead replica is promoted if there is more than one. ## Can only replicate complete transactions Logical decoding can only replicate a transaction after it has committed. This usefully skips replication of rolled back transactions, but it also means that very large transactions must be completed upstream before they can begin on the downstream, adding to replication latency. ## Replicates only one transaction at a time Logical decoding serializes transactions in commit order, so pglogical_output cannot replay interleaved concurrent transactions. This can lead to high latencies when big transactions are being replayed, since smaller transactions get queued up behind them. ## Unique index required for inserts or updates To replicate `INSERT`s or `UPDATE`s it is necessary to have a `PRIMARY KEY` or a (non-partial, columns-only) `UNIQUE` index on the table, so the table has a `REPLICA IDENTITY`. Without that `pglogical_output` doesn't know what old key to send to allow the receiver to tell which tuple is being updated. ## UNLOGGED tables aren't replicated Because `UNLOGGED` tables aren't written to WAL, they aren't replicated by logical or physical replication. You can only replicate `UNLOGGED` tables with trigger-based solutions. ## Unchanged fields are often sent in `UPDATE` Because there's no tracking of dirty/clean fields when a tuple is updated, logical decoding can't tell if a given field was changed by an update. Unchanged fields can only by identified and omitted if they're a variable length TOASTable type and are big enough to get stored out-of-line in a TOAST table. # Troubleshooting and debugging ## Non-destructively previewing pending data on a slot Using the json mode of `pglogical_output` you can examine pending transactions on a slot without consuming them, so they are still delivered to the usual client application that created/owns this slot. This is best done using the SQL interface to logical decoding, since it gives you finer control than using `pg_recvlogical`. You can only peek at a slot while there is no other client connected to that slot. Use `pg_logical_slot_peek_changes` to examine the change stream without destructively consuming changes. This is extremely helpful when trying to determine why an error occurs in a downstream, since you can examine a transaction in json (rather than binary) format. It's necessary to supply a minimal set of required parameters to the output plugin. e.g. given setup: CREATE TABLE discard_test(blah text); SELECT 'init' FROM pg_create_logical_replication_slot('demo_slot', 'pglogical_output'); INSERT INTO discard_test(blah) VALUES('one'); INSERT INTO discard_test(blah) VALUES('two1'),('two2'),('two3'); INSERT INTO discard_test(blah) VALUES('three1'),('three2'); you can peek at the change stream with: SELECT location, xid, data FROM pg_logical_slot_peek_changes('demo_slot', NULL, NULL, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json'); The two `NULL`s mean you don't want to stop decoding after any particular LSN or any particular number of changes. Decoding will stop when there's nothing left to decode or you cancel the query. This will emit a key/value startup message then change data rows like: location | xid | data 0/4E8AAF0 | 5562 | {"action":"B", has_catalog_changes:"f", xid:"5562", first_lsn:"0/4E8AAF0", commit_time:"2015-11-13 14:26:21.404425+08"} 0/4E8AAF0 | 5562 | {"action":"I","relation":["public","discard_test"],"newtuple":{"blah":"one"}} 0/4E8AB70 | 5562 | {"action":"C", final_lsn:"0/4E8AB30", end_lsn:"0/4E8AB70"} 0/4E8ABA8 | 5563 | {"action":"B", has_catalog_changes:"f", xid:"5563", first_lsn:"0/4E8ABA8", commit_time:"2015-11-13 14:26:32.015611+08"} 0/4E8ABA8 | 5563 | {"action":"I","relation":["public","discard_test"],"newtuple":{"blah":"two1"}} 0/4E8ABE8 | 5563 | {"action":"I","relation":["public","discard_test"],"newtuple":{"blah":"two2"}} 0/4E8AC28 | 5563 | {"action":"I","relation":["public","discard_test"],"newtuple":{"blah":"two3"}} 0/4E8ACA8 | 5563 | {"action":"C", final_lsn:"0/4E8AC68", end_lsn:"0/4E8ACA8"} .... The output is the LSN (log sequence number) associated with a change, the top level transaction ID that performed the change, and the change data as json. You can see the transaction boundaries by xid changes and by the "B"egin and "C"ommit messages, and you can see the individual row "I"nserts. Replication origins, commit timestamps, etc will be shown if known. See http://www.postgresql.org/docs/current/static/functions-admin.html for information on the peek functions. If you want the binary format you can get that with `pg_logical_slot_peek_binary_changes` and the `native` protocol, but that's generally much less useful. # Manually discarding a change from a slot Sometimes it's desirable to manually purge one or more changes from a replication slot. This is usually an error recovery step when problems arise with the downstream code that's replaying from the slot. You can use the peek functions to determine the point in the stream you want to discard up to, as identified by LSN (log sequence number). See "non-destructively previewing pending data on a slot" above for details. You can't control the point you start discarding from, it's always from the current stream position up to a point you specify. If the peek shows that there's data you still want to retain you must make sure that the downstream replays up to the point you want to keep changes and sends replay confirmation. In other words there's no way to cut a sequence of changes out of the middle of the pending change stream. Once you've peeked the stream and know the LSN you want to discard up to, you can use `pg_logical_slot_get_changes`, specifying an `upto_lsn`, to consume changes from the slot up to but not including that point. That will be the point at which replay resumes. For example, if you wanted to discard the first transaction in the example from the section above, i.e. discard xact 5562 and start decoding at xact 5563 from its' BEGIN lsn `0/4E8ABA8`, you'd run: SELECT location, xid, data FROM pg_logical_slot_get_changes('demo_slot', '0/4E8ABA8', NULL, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json'); Note that `_get_changes` is used instead of `_peek_changes` and that the `upto_lsn` is `'0/4E8ABA8'` instead of `NULL`. pglogical-REL2_4_1/internals-doc/protocol.txt000066400000000000000000000702341415142317000213340ustar00rootroot00000000000000= Pg_logical protocol pglogical_output defines a libpq subprocotol for streaming tuples, metadata, etc, from the decoding plugin to receivers. This protocol is an inner layer in a stack: * tcp or unix sockets ** libpq protocol *** libpq replication subprotocol (COPY BOTH etc) **** pg_logical output plugin => consumer protocol so clients can simply use libpq's existing replication protocol support, directly or via their libpq-wrapper driver. This is a binary protocol intended for compact representation. `pglogical_output` also supports a json-based text protocol with json representations of the same changesets, supporting all the same hooks etc, intended mainly for tracing/debugging/diagnostics. That protocol is not discussed here. == ToC == Protocol flow The protocol flow is primarily from upstream walsender/decoding plugin to the downstream receiver. The only information the flows downstream-to-upstream is: * The initial parameter list sent to `START_REPLICATION`; and * replay progress messages We can accept an arbitrary list of params to `START_REPLICATION`. After that we have no general purpose channel for information to flow upstream. That means we can't do a multi-step negotiation/handshake for determining the replication options to use, binary protocol, etc. The main form of negotiation is the client getting a "take it or leave it" set of settings from the server in an initial startup message sent before any replication data (see below) and, if it doesn't like them, reconnecting with different startup options. Except for the negotiation via initial parameter list and then startup message the protocol flow is the same as any other walsender-based logical replication plugin. The data stream is sent in COPY BOTH mode as a series of CopyData messages encapsulating replication data, and ends when the client disconnects. There's no facility for ending the COPY BOTH mode and returning to the walsender command parser to issue new commands. This is a limitation of the walsender interface, not pglogical_output. == Protocol messages The individual protocol messages are discussed in the following sub-sections. Protocol flow and logic comes in the next major section. Absolutely all top-level protocol messages begin with a message type byte. While represented in code as a character, this is a signed byte with no associated encoding. Since the PostgreSQL libpq COPY protocol supplies a message length there’s no need for top-level protocol messages to embed a length in their header. === BEGIN message A stream of rows starts with a `BEGIN` message. Rows may only be sent after a `BEGIN` and before a `COMMIT`. |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|Literal ‘**B**’ (0x42) |flags|uint8| * 0-3: Reserved, client _must_ ERROR if set and not recognised. |lsn|uint64|“final_lsn” in decoding context - currently it means lsn of commit |commit time|uint64|“commit_time” in decoding context |remote XID|uint32|“xid” in decoding context |=== === Forwarded transaction origin message The message after the `BEGIN` may be a _forwarded transaction origin_ message indicating what upstream node the transaction came from. Sent if the immediately prior message was a `BEGIN` message, the upstream transaction was forwarded from another node, and replication origin forwarding is enabled, i.e. `forward_changeset_origins` is `t` in the startup reply message. A "node" could be another host, another DB on the same host, or pretty much anything. Whatever origin name is found gets forwarded. The origin identifier is of arbitrary and application-defined format. Applications _should_ prefix their origin identifier with a fixed application name part, like `bdr_`, `myapp_`, etc. It is application-defined what an application does with forwarded transactions from other applications. An origin message with a zero-length origin name indicates that the origin could not be identified but was (probably) not the local node. It is client-defined what action is taken in this case. It is a protocol error to send/receive a forwarded transaction origin message at any time other than immediately after a `BEGIN` message. The origin identifier is typically closely related to replication slot names and replication origins’ names in an application system. For more detail see _Changeset Forwarding_ in the README. |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|Literal ‘**O**’ (0x4f) |flags|uint8| * 0-3: Reserved, application _must_ ERROR if set and not recognised |origin_lsn|uint64|Log sequence number (LSN, XLogRecPtr) of the transaction’s commit record on its origin node (as opposed to the forwarding node’s commit LSN, which is ‘lsn’ in the BEGIN message) |origin_identifier_length|uint8|Length in bytes of origin_identifier |origin_identifier|signed char[origin_identifier_length]|An origin identifier of arbitrary, upstream-application-defined structure. _Should_ be text in the same encoding as the upstream database. NULL-terminated. _Should_ be 7-bit ASCII. |=== === COMMIT message A stream of rows ends with a `COMMIT` message. There is no `ROLLBACK` message because aborted transactions are not sent by the upstream. |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|Literal ‘**C**’ (0x43) |Flags|uint8| * 0-3: Reserved, client _must_ ERROR if set and not recognised |Commit LSN|uint64|commit_lsn in decoding commit decode callback. This is the same value as in the BEGIN message, and marks the end of the transaction. |End LSN|uint64|end_lsn in decoding transaction context |Commit time|uint64|commit_time in decoding transaction context |=== === INSERT, UPDATE or DELETE message After a `BEGIN` or metadata message, the downstream should expect to receive zero or more row change messages, composed of an insert/update/delete message with zero or more tuple fields, each of which has one or more tuple field values. The row’s relidentifier _must_ match that of the most recently preceding metadata message. All consecutive row messages must currently have the same relidentifier. (_Later extensions to add metadata caching will relax these requirements for clients that advertise caching support; see the documentation on metadata messages for more detail_). It is an error to decode rows using metadata received after the row was received, or using metadata that is not the most recently received metadata revision that still predates the row. I.e. in the sequence M1, R1, R2, M2, R3, M4: R1 and R2 must be decoded using M1, and R3 must be decoded using M2. It is an error to use M4 to decode any of the rows, to use M1 to decode R3, or to use M2 to decode R1 and R2. Row messages _may not_ arrive except during a transaction as delimited by `BEGIN` and `COMMIT` messages. It is an error to receive a row message outside a transaction. Any unrecognised tuple type or tuple part type is an error on the downstream that must result in a client disconnect and error message. Downstreams are expected to negotiate compatibility, and upstreams must not add new tuple types or tuple field types without negotiation. The downstream reads rows until the next non-row message is received. There is no other end marker or any indication of how many rows to expect in a sequence. ==== Row message header |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|Literal ‘**I**’nsert (0x49), ‘**U**’pdate’ (0x55) or ‘**D**’elete (0x44) |flags|uint8|Row flags (reserved) |relidentifier|uint32|relidentifier that matches the table metadata message sent for this row. (_Not present in BDR, which sends nspname and relname instead_) |[tuple parts]|[composite]| |=== One or more tuple-parts fields follow. ==== Tuple fields |=== |Tuple type|signed char|Identifies the kind of tuple being sent. |tupleformat|signed char|‘**T**’ (0x54) |natts|uint16|Number of fields sent in this tuple part. (_Present in BDR, but meaning significantly different here)_ |[tuple field values]|[composite]| |=== ===== Tuple tupleformat compatibility Unrecognised _tupleformat_ kinds are a protocol error for the downstream. ==== Tuple field value fields These message parts describe individual fields within a tuple. There are two kinds of tuple value fields, abbreviated and full. Which is being read is determined based on the first field, _kind_. Abbreviated tuple value fields are nothing but the message kind: |=== |*Message*|*Type/Size*|*Notes* |kind|signed char| * ‘**n**’ull (0x6e) field |=== Full tuple value fields have a length and datum: |=== |*Message*|*Type/Size*|*Notes* |kind|signed char| * ‘**i**’nternal binary (0x62) field |length|int4|Only defined for kind = i\|b\|t |data|[length]|Data in a format defined by the table metadata and column _kind_. |=== ===== Tuple field values kind compatibility Unrecognised field _kind_ values are a protocol error for the downstream. The downstream may not continue processing the protocol stream after this point**.** The upstream may not send ‘**i**’nternal or ‘**b**’inary format values to the downstream without the downstream negotiating acceptance of such values. The downstream will also generally negotiate to receive type information to use to decode the values. See the section on startup parameters and the startup message for details. === Table/row metadata messages Before sending changed rows for a relation, a metadata message for the relation must be sent so the downstream knows the namespace, table name, column names, optional column types, etc. A relidentifier field, an arbitrary numeric value unique for that relation on that upstream connection, maps the metadata to following rows. A client should not assume that relation metadata will be followed immediately (or at all) by rows, since future changes may lead to metadata messages being delivered at other times. Metadata messages may arrive during or between transactions. The upstream may not assume that the downstream retains more metadata than the one most recent table metadata message. This applies across all tables, so a client is permitted to discard metadata for table x when getting metadata for table y. The upstream must send a new metadata message before sending rows for a different table, even if that metadata was already sent in the same session or even same transaction. _This requirement will later be weakened by the addition of client metadata caching, which will be advertised to the upstream with an output plugin parameter._ Columns in metadata messages are numbered from 0 to natts-1, reading consecutively from start to finish. The column numbers do not have to be a complete description of the columns in the upstream relation, so long as all columns that will later have row values sent are described. The upstream may choose to omit columns it doesn’t expect to send changes for in any given series of rows. Column numbers are not necessarily stable across different sets of metadata for the same table, even if the table hasn’t changed structurally. A metadata message may not be used to decode rows received before that metadata message. ==== Table metadata header |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|Literal ‘**R**’ (0x52) |flags|uint8| * 0-6: Reserved, client _must_ ERROR if set and not recognised. |relidentifier|uint32|Arbitrary relation id, unique for this upstream. In practice this will probably be the upstream table’s oid, but the downstream can’t assume anything. |nspnamelength|uint8|Length of namespace name (incl. terminating \0) |nspname|signed char[nspnamelength]|Relation namespace (null terminated) |relnamelength|uint8|Length of relation name (incl. terminating \0) |relname|char[relname]|Relation name (null terminated) |attrs block|signed char|Literal: ‘**A**’ (0x41) |natts|uint16|number of attributes |[fields]|[composite]|Sequence of ‘natts’ column metadata blocks, each of which begins with a column delimiter followed by zero or more column metadata blocks, each with the same column metadata block header. This chunked format is used so that new metadata messages can be added without breaking existing clients. |=== ==== Column delimiter Each column’s metadata begins with a column metadata header. This comes immediately after the natts field in the table metadata header or after the last metadata block in the prior column. It has the same char header as all the others, and the flags field is the same size as the length field in other blocks, so it’s safe to read this as a column metadata block header. Currently, the only defined flag is 0x1 indicating that the column is part of the relation's identity key. |=== |*Message*|*Type/Size*|*Notes* |blocktype|signed char|‘**C**’ (0x43) - column |flags|uint8|Column info flags |=== ==== Column metadata block header All column metadata blocks share the same header, which is the same length as a column delimiter: |=== |*Message*|*Type/Size*|*Notes* |blocktype|signed char|Identifies the kind of metadata block that follows. |blockbodylength|uint16|Length of block in bytes, excluding blocktype char and length field. |=== ==== Column name block This block just carries the name of the column, nothing more. It begins with a column metadata block, and the rest of the message is the column name. |=== |*Message*|*Type/Size*|*Notes* |[column metadata block header]|[composite]|blocktype = ‘**N**’ (0x4e) |colname|char[blockbodylength]|Column name. |=== ==== Column type block T.B.D. Not defined in first protocol revision. Likely to send a type identifier (probably the upstream oid) as a reference to a “type info” protocol message to be delivered before. Then we can cache the type descriptions and avoid repeating long schemas and names, just using the oids. Needs to have room to handle: * built-in core types * extension types (ext version may vary) * enum types (CREATE TYPE … AS ENUM) * range types (CREATE TYPE … AS RANGE) * composite types (CREATE TYPE … AS (...)) * custom types (CREATE TYPE ( input = x_in, output = x_out )) … some of which can be nested == Startup message After processing output plugin arguments, the upstream output plugin must send a startup message as its first message on the wire. It is a trivial header followed by alternating key and value strings represented as null-terminated unsigned char strings. This message specifies the capabilities the output plugin enabled and describes the upstream server and plugin. This may change how the client decodes the data stream, and/or permit the client to disconnect and report an error to the user if the result isn’t acceptable. If replication is rejected because the client is incompatible or the server is unable to satisfy required options, the startup message may be followed by a libpq protocol FATAL message that terminates the session. See “Startup errors” below. The parameter names and values are sent as alternating key/value pairs as null-terminated strings, e.g. +“key1\0parameter1\0key2\0value2\0”+ |=== |*Message*|*Type/Size*|*Notes* |Message type|signed char|‘**S**’ (0x53) - startup |Startup message version|uint8|Value is always “1”. |(parameters)|null-terminated key/value pairs|See table below for parameter definitions. |=== === Startup message parameters Since all parameter values are sent as strings, the value types given below specify what the value must be reasonably interpretable as. |=== |*Key name*|*Value type*|*Description* |max_proto_version|integer|Newest version of the protocol supported by output plugin. |min_proto_version|integer|Oldest protocol version supported by server. |proto_format|text|Protocol format requested. native (documented here) or json. Default is native. |coltypes|boolean|Column types will be sent in table metadata. |pg_version_num|integer|PostgreSQL server_version_num of server, if it’s PostgreSQL. e.g. 090400 |pg_version|string|PostgreSQL server_version of server, if it’s PostgreSQL. |pg_catversion|uint32|Version of the PostgreSQL system catalogs on the upstream server, if it’s PostgreSQL. |binary|_set of parameters, specified separately_|See “_the __‘binary’__ parameters_” below, and “_Parameters relating to exchange of binary values_” |database_encoding|string|The native text encoding of the database the plugin is running in |encoding|string|Field values for textual data will be in this encoding in native protocol text, binary or internal representation. For the native protocol this is currently always the same as `database_encoding`. For text-mode json protocol this is always the same as `client_encoding`. |forward_changeset_origins|bool|Tells the client that the server will send changeset origin information. See “_Changeset forwarding_” for details. |no_txinfo|bool|Requests that variable transaction info such as XIDs, LSNs, and timestamps be omitted from output. Mainly for tests. Currently ignored for protos other than json. |=== The ‘binary’ parameter set: == |=== |*Key name*|*Value type*|*Description* |binary.internal_basetypes|boolean|If true, PostgreSQL internal binary representations for row field data may be used for some or all row fields, if here the type is appropriate and the binary compatibility parameters of upstream and downstream match. See binary.want_internal_basetypes in the output plugin parameters for details. May only be true if _binary.want_internal_basetypes_ was set to true by the client in the parameters and the client’s accepted binary format matches that of the server. |binary.binary_basetypes|boolean|If true, external binary format (send/recv format) may be used for some or all row field data where the field type is a built-in base type whose send/recv format is compatible with binary.binary_pg_version . May only be set if _binary.want_binary_basetypes_ was set to true by the client in the parameters and the client’s accepted send/recv format matches that of the server. |binary.binary_pg_version|uint16|The PostgreSQL major version that send/recv format values will be compatible with. This is not necessarily the actual upstream PostgreSQL version. |binary.sizeof_int|uint8|sizeof(int) on the upstream. |binary.sizeof_long|uint8|sizeof(long) on the upstream. |binary.sizeof_datum|uint8|Same as sizeof_int, but for the PostgreSQL Datum typedef. |binary.maxalign|uint8|Upstream PostgreSQL server’s MAXIMUM_ALIGNOF value - platform dependent, determined at build time. |binary.bigendian|bool|True iff the upstream is big-endian. |binary.float4_byval|bool|Upstream PostgreSQL’s float4_byval compile option. |binary.float8_byval|bool|Upstream PostgreSQL’s float8_byval compile option. |binary.integer_datetimes|bool|Whether TIME, TIMESTAMP and TIMESTAMP WITH TIME ZONE will be sent using integer or floating point representation. Usually this is the value of the upstream PostgreSQL’s integer_datetimes compile option. |=== == Startup errors If the server rejects the client’s connection - due to non-overlapping protocol support, unrecognised parameter formats, unsupported required parameters like hooks, etc - then it will follow the startup reply message with a ++++++normal libpq protocol error message++++++. (Current versions send this before the startup message). == Arguments client supplies to output plugin The one opportunity for the downstream client to send information (other than replay feedback) to the upstream is at connect-time, as an array of arguments to the output plugin supplied to START LOGICAL REPLICATION. There is no back-and-forth, no handshake. As a result, the client mainly announces capabilities and makes requests of the output plugin. The output plugin will ERROR if required parameters are unset, or where incompatibilities that cannot be resolved are found. Otherwise the output plugin reports what it could and could not honour in the startup message it sends as the first message on the wire down to the client. The client chooses whether to continue replay or to disconnect and report an error to the user, then possibly reconnect with different options. === Output plugin arguments The output plugin’s key/value arguments are specified in pairs, as key and value. They’re what’s passed to START_REPLICATION, etc. All parameters are passed in text form. They _should_ be limited to 7-bit ASCII, since the server’s text encoding is not known, but _may_ be normalized precomposed UTF-8. The types specified for parameters indicate what the output plugin should attempt to convert the text into. Clients should not send text values that are outside the range for that type. ==== Capabilities Many values are capabilities flags for the client, indicating that it understands optional features like metadata caching, binary format transfers, etc. In general the output plugin _may_ disregard capabilities the client advertises as supported and act as if they are not supported. If a capability is advertised as unsupported or is not advertised the output plugin _must not_ enable the corresponding features. In other words, don’t send the client something it’s not expecting. ==== Protocol versioning Two parameters max_proto_version and min_proto_version, which clients must always send, allow negotiation of the protocol version. The output plugin must ERROR if the client protocol support does not overlap its own protocol support range. The protocol version is only incremented when there are major breaking changes that all or most clients must be modified to accommodate. Most changes are done by adding new optional messages and/or by having clients advertise capabilities to opt in to features. Because these versions are expected to be incremented, to make it clear that the format of the startup parameters themselves haven’t changed, the first key/value pair _must_ be the parameter startup_params_format with value “1”. |=== |*Key*|*Type*|*Value(s)*|*Notes* |startup_params_format|int8|1|The format version of this startup parameter set. Always the digit 1 (0x31), null terminated. |max_proto_version|int32|1|Newest version of the protocol supported by client. Output plugin must ERROR if supported version too old. *Required*, ERROR if missing. |min_proto_version|int32|1|Oldest version of the protocol supported by client. Output plugin must ERROR if supported version too old. *Required*, ERROR if missing. |=== ==== Client requirements and capabilities |=== |*Key*|*Type*|*Default*|*Notes* |expected_encoding|string|null|The text encoding the downstream expects field values to be in. Applies to text, binary and internal representations of field values in native format. Has no effect on other protocol content. If specified, the upstream must honour it. For json protocol, must be unset or match `client_encoding`. (Current plugin versions ERROR if this is set for the native protocol and not equal to the upstream database's encoding). |want_coltypes|boolean|false|The client wants to receive data type information about columns. |=== ==== General client information These keys tell the output plugin about the client. They’re mainly for informational purposes. In particular, the versions must _not_ be used to determine compatibility for binary or send/recv format, as non-PostgreSQL clients will simply not send them at all but may still understand binary or send/recv format fields. |=== |*Key*|*Type*|*Default*|*Notes* |pg_version_num|integer|null|PostgreSQL server_version_num of client, if it’s PostgreSQL. e.g. 090400 |pg_version|string|null|PostgreSQL server_version of client, if it’s PostgreSQL. |=== ==== Parameters relating to exchange of binary values The downstream may specify to the upstream that it is capable of understanding binary (PostgreSQL internal binary datum format), and/or send/recv (PostgreSQL binary interchange) format data by setting the binary.want_binary_basetypes and/or binary.want_internal_basetypes options, or other yet-to-be-defined options. An upstream output plugin that does not support one or both formats _may_ ignore the downstream’s binary support and send text format, in which case it may ignore all binary. parameters. All downstreams _must_ support text format. An upstream output plugin _must not_ send binary or send/recv format unless the downstream has announced it can receive it. If both upstream and downstream support both formats an upstream should prefer binary format and fall back to send/recv, then to text, if compatibility requires. Internal and binary format selection should be done on a type-by-type basis. It is quite normal to send ‘text’ format for extension types while sending binary for built-in types. The downstream _must_ specify its compatibility requirements for internal and binary data if it requests either or both formats. The upstream _must_ honour these by falling back from binary to send/recv, and from send/recv to text, where the upstream and downstream are not compatible. An unspecified compatibility field _must_ presumed to be unsupported by the downstream so that older clients that don’t know about a change in a newer version don’t receive unexpected data. For example, in the unlikely event that PostgreSQL 99.8 switched to 128-bit DPD (Densely Packed Decimal) representations of NUMERIC instead of the current arbitrary-length BCD (Binary Coded Decimal) format, a new binary.dpd_numerics parameter would be added. Clients that didn’t know about the change wouldn’t know to set it, so the upstream would presume it unsupported and send text format NUMERIC to those clients. This also means that clients that support the new format wouldn’t be able to receive the old format in binary from older servers since they’d specify dpd_numerics = true in their compatibility parameters. At this time a downstream may specify compatibility with only one value for a given option; i.e. a downstream cannot say it supports both 4-byte and 8-byte sizeof(int). Leaving it unspecified means the upstream must assume the downstream supports neither. (A future protocol extension may allow clients to specify alternative sets of supported formats). The `pg_version` option _must not_ be used to decide compatibility. Use `binary.basetypes_major_version` instead. |=== |*Key name*|*Value type*|*Default*|*Description* |binary.want_binary_basetypes|boolean|false|True if the client accepts binary interchange (send/recv) format rows for PostgreSQL built-in base types. |binary.want_internal_basetypes|boolean|false|True if the client accepts PostgreSQL internal-format binary output for base PostgreSQL types not otherwise specified elsewhere. |binary.basetypes_major_version|uint16|null|The PostgreSQL major version (x.y) the downstream expects binary and send/recv format values to be in. Represented as an integer in XXYY format (no leading zero since it’s an integer), e.g. 9.5 is 905. This corresponds to PG_VERSION_NUM/100 in PostgreSQL. |binary.sizeof_int|uint8|+null+|sizeof(int) on the downstream. |binary.sizeof_long|uint8|null|sizeof(long) on the downstream. |binary.sizeof_datum|uint8|null|Same as sizeof_int, but for the PostgreSQL Datum typedef. |binary.maxalign|uint8|null|Downstream PostgreSQL server’s maxalign value - platform dependent, determined at build time. |binary.bigendian|bool|null|True iff the downstream is big-endian. |binary.float4_byval|bool|null|Downstream PostgreSQL’s float4_byval compile option. |binary.float8_byval|bool|null|Downstream PostgreSQL’s float8_byval compile option. |binary.integer_datetimes|bool|null|Downstream PostgreSQL’s integer_datetimes compile option. |=== == Extensibility Because of the use of optional parameters in output plugin arguments, and the confirmation/response sent in the startup packet, a basic handshake is possible between upstream and downstream, allowing negotiation of capabilities. The output plugin must never send non-optional data or change its wire format without confirmation from the client that it can understand the new data. It may send optional data without negotiation. When extending the output plugin arguments, add-ons are expected to prefix all keys with the extension name, and should preferably use a single top level key with a json object value to carry their extension information. Additions to the startup message should follow the same pattern. Hooks and plugins can be used to add functionality specific to a client. == JSON protocol If `proto_format` is set to `json` then the output plugin will emit JSON instead of the custom binary protocol. JSON support is intended mainly for debugging and diagnostics. The JSON format supports all the same hooks. pglogical-REL2_4_1/pglogical--1.0.0--1.0.1.sql000066400000000000000000000015751415142317000200730ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = true, synchronize_data boolean = true, forward_origins text[] = '{all}') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; DO $$ BEGIN IF (SELECT count(1) FROM pglogical.node) > 0 THEN SELECT * FROM pglogical.create_replication_set('ddl_sql', true, false, false, false); END IF; END; $$; UPDATE pglogical.subscription SET sub_replication_sets = array_append(sub_replication_sets, 'ddl_sql'); WITH applys AS ( SELECT sub_name FROM pglogical.subscription WHERE sub_enabled ), disable AS ( SELECT pglogical.alter_subscription_disable(sub_name, true) FROM applys ) SELECT pglogical.alter_subscription_enable(sub_name, true) FROM applys; pglogical-REL2_4_1/pglogical--1.0.0.sql000066400000000000000000000224301415142317000173540ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); -- Currently we allow only one node record per database CREATE UNIQUE INDEX local_node_onlyone ON pglogical.local_node ((true)); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], UNIQUE (sub_origin, sub_target) ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only}', synchronize_structure boolean = true, synchronize_data boolean = true, forward_origins text[] = '{all}') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id integer NOT NULL, set_reloid regclass NOT NULL, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_tables AS ( SELECT s.set_name, t.set_reloid FROM pglogical.replication_set_table t, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = t.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_tables s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_tables); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE OR REPLACE FUNCTION pglogical.truncate_trigger_add() RETURNS event_trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_truncate_trigger_add'; CREATE EVENT TRIGGER pglogical_truncate_trigger_add ON ddl_command_end WHEN TAG IN ('CREATE TABLE', 'CREATE TABLE AS') EXECUTE PROCEDURE pglogical.truncate_trigger_add(); CREATE OR REPLACE FUNCTION pglogical.dependency_check_trigger() RETURNS event_trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_dependency_check_trigger'; CREATE EVENT TRIGGER pglogical_dependency_check_trigger ON sql_drop EXECUTE PROCEDURE pglogical.dependency_check_trigger(); CREATE FUNCTION pglogical.pglogical_hooks_setup(internal) RETURNS void STABLE LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; pglogical-REL2_4_1/pglogical--1.0.1--1.1.0.sql000066400000000000000000000107401415142317000200660ustar00rootroot00000000000000CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; DROP FUNCTION pglogical.replicate_ddl_command(command text); CREATE OR REPLACE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; DROP VIEW pglogical.TABLES; ALTER TABLE pglogical.replication_set_table RENAME TO replication_set_relation; ALTER TABLE pglogical.replication_set_relation ALTER COLUMN set_id TYPE oid; CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE OR REPLACE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_relation r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; ALTER EVENT TRIGGER pglogical_truncate_trigger_add ENABLE ALWAYS; ALTER EVENT TRIGGER pglogical_dependency_check_trigger ENABLE ALWAYS; DROP FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[], synchronize_structure boolean, synchronize_data boolean, forward_origins text[]); CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; DROP FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize boolean); CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; DROP FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize boolean); CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; pglogical-REL2_4_1/pglogical--1.1.0--1.1.1.sql000066400000000000000000000000001415142317000200530ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--1.1.1--1.1.2.sql000066400000000000000000000000001415142317000200550ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--1.1.2--1.2.0.sql000066400000000000000000000000001415142317000200550ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--1.2.0--1.2.1.sql000066400000000000000000000000371415142317000200670ustar00rootroot00000000000000DROP INDEX local_node_onlyone; pglogical-REL2_4_1/pglogical--1.2.1--1.2.2.sql000066400000000000000000000000511415142317000200650ustar00rootroot00000000000000DROP INDEX IF EXISTS local_node_onlyone; pglogical-REL2_4_1/pglogical--1.2.2--2.0.0.sql000066400000000000000000000104611415142317000200710ustar00rootroot00000000000000ALTER TABLE pglogical.subscription ADD COLUMN sub_apply_delay interval NOT NULL DEFAULT '0'; CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); WITH seqs AS ( SELECT r.set_id, r.set_reloid FROM pg_class c JOIN replication_set_relation r ON (r.set_reloid = c.oid) WHERE c.relkind = 'S' ), inserted AS ( INSERT INTO replication_set_seq SELECT set_id, set_reloid FROM seqs ) DELETE FROM replication_set_relation r USING seqs s WHERE r.set_reloid = s.set_reloid; ALTER TABLE pglogical.replication_set_relation RENAME TO replication_set_table; ALTER TABLE pglogical.replication_set_table ADD COLUMN set_att_list text[], ADD COLUMN set_row_filter pg_node_tree; DROP FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean); CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; DROP FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass); CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; DROP FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[], synchronize_structure boolean, synchronize_data boolean, forward_origins text[]); CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; DROP VIEW pglogical.TABLES; CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); DROP EVENT TRIGGER IF EXISTS pglogical_truncate_trigger_add; DROP EVENT TRIGGER IF EXISTS pglogical_dependency_check_trigger; DROP FUNCTION IF EXISTS pglogical.truncate_trigger_add(); DROP FUNCTION IF EXISTS pglogical.dependency_check_trigger(); DROP FUNCTION IF EXISTS pglogical_hooks_setup(internal); pglogical-REL2_4_1/pglogical--2.0.0--2.0.1.sql000066400000000000000000000000001415142317000200530ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.0.0--2.1.0.sql000066400000000000000000000002311415142317000200610ustar00rootroot00000000000000CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; pglogical-REL2_4_1/pglogical--2.0.1--2.1.0.sql000066400000000000000000000002311415142317000200620ustar00rootroot00000000000000CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; pglogical-REL2_4_1/pglogical--2.1.0--2.1.1.sql000066400000000000000000000003401415142317000200640ustar00rootroot00000000000000ALTER TABLE pglogical.local_sync_status ADD COLUMN sync_statuslsn pg_lsn NULL; UPDATE pglogical.local_sync_status SET sync_statuslsn = '0/0'; ALTER TABLE pglogical.local_sync_status ALTER COLUMN sync_statuslsn SET NOT NULL; pglogical-REL2_4_1/pglogical--2.1.1--2.2.0.sql000066400000000000000000000000001415142317000200560ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.2.0--2.2.1.sql000066400000000000000000000012511415142317000200700ustar00rootroot00000000000000CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.2.0.sql000066400000000000000000000266771415142317000174000ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; pglogical-REL2_4_1/pglogical--2.2.1--2.2.2.sql000066400000000000000000000000001415142317000200610ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.2.1.sql000066400000000000000000000301501415142317000173560ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.2.2--2.3.0.sql000066400000000000000000000057631415142317000201060ustar00rootroot00000000000000ALTER TABLE pglogical.subscription ADD COLUMN sub_force_text_transfer boolean NOT NULL DEFAULT 'f'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure text = 'none', synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; DROP FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[], synchronize_structure boolean, synchronize_data boolean, forward_origins text[], apply_delay interval); ALTER TABLE pglogical.replication_set_table ADD COLUMN set_nsptarget name NOT NULL , ADD COLUMN set_reltarget name NOT NULL; ALTER TABLE pglogical.replication_set_seq ADD COLUMN set_nsptarget name NOT NULL , ADD COLUMN set_seqtarget name NOT NULL; DROP FUNCTION pglogical.show_repset_table_info(regclass, text[]); CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean, OUT nsptarget text, OUT reltarget text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_repset_table_info_by_target(nsptarget name, reltarget name, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean, OUT nsptarget text, OUT reltarget text) RETURNS SETOF record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info_by_target'; UPDATE pglogical.replication_set_table SET set_nsptarget = n.nspname , set_reltarget = c.relname FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.oid = set_reloid; UPDATE pglogical.replication_set_seq SET set_nsptarget = n.nspname , set_seqtarget = c.relname FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.oid = set_seqoid; -- a VACUUM FULL of the table above would be nice here. DROP FUNCTION pglogical.replication_set_add_table(name, regclass, boolean, text[], text); CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL, nsptarget name DEFAULT NULL, reltarget name DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; DROP FUNCTION pglogical.replication_set_add_sequence(name, regclass, boolean); CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false, nsptarget name DEFAULT NULL, reltarget name DEFAULT NULL) RETURNS boolean VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; pglogical-REL2_4_1/pglogical--2.2.2--2.3.1.sql000066400000000000000000000001441415142317000200730ustar00rootroot00000000000000ALTER TABLE pglogical.subscription ADD COLUMN sub_force_text_transfer boolean NOT NULL DEFAULT 'f'; pglogical-REL2_4_1/pglogical--2.2.2.sql000066400000000000000000000301501415142317000173570ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0') RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.3.0--2.3.1.sql000066400000000000000000000075771415142317000201130ustar00rootroot00000000000000DROP FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[], synchronize_structure text, synchronize_data boolean, forward_origins text[], apply_delay interval, force_text_transfer boolean); CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; DROP FUNCTION pglogical.show_repset_table_info(regclass, text[]); CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; DROP FUNCTION pglogical.show_repset_table_info_by_target(name, name, text[]); DROP FUNCTION pglogical.replication_set_add_table(name, regclass, boolean, text[], text, name, name); CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; DROP FUNCTION pglogical.replication_set_add_sequence(name, regclass, boolean, name, name); CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; ALTER TABLE pglogical.replication_set_table RENAME TO replication_set_table_old; CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); INSERT INTO pglogical.replication_set_table SELECT set_id, set_reloid, set_att_list, set_row_filter FROM pglogical.replication_set_table_old; DROP VIEW pglogical.tables; DROP TABLE pglogical.replication_set_table_old; ALTER TABLE pglogical.replication_set_seq RENAME TO replication_set_seq_old; CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); INSERT INTO pglogical.replication_set_seq SELECT set_id, set_seqoid FROM pglogical.replication_set_seq_old; DROP TABLE pglogical.replication_set_seq_old; -- must recreate on top of new replication_set_table CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); pglogical-REL2_4_1/pglogical--2.3.0.sql000066400000000000000000000315271415142317000173670ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure text = 'none', synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, set_nsptarget name NOT NULL, set_reltarget name NOT NULL, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, set_nsptarget name NOT NULL, set_seqtarget name NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL, nsptarget name DEFAULT NULL, reltarget name DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false, nsptarget name DEFAULT NULL, reltarget name DEFAULT NULL) RETURNS boolean VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean, OUT nsptarget text, OUT reltarget text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_repset_table_info_by_target(nsptarget name, reltarget name, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean, OUT nsptarget text, OUT reltarget text) RETURNS SETOF record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info_by_target'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.3.1--2.3.2.sql000066400000000000000000000000001415142317000200630ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.3.1.sql000066400000000000000000000303131415142317000173600ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.3.2--2.3.3.sql000066400000000000000000000000001415142317000200650ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.3.2.sql000066400000000000000000000303131415142317000173610ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.3.3--2.3.4.sql000066400000000000000000000000001415142317000200670ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.3.3.sql000066400000000000000000000303131415142317000173620ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.3.4--2.4.0.sql000066400000000000000000000000001415142317000200650ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.3.4.sql000066400000000000000000000303131415142317000173630ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.4.0--2.4.1.sql000066400000000000000000000000001415142317000200630ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical--2.4.0.sql000066400000000000000000000303131415142317000173600ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical--2.4.1.sql000066400000000000000000000303131415142317000173610ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical" to load this file. \quit CREATE TABLE pglogical.node ( node_id oid NOT NULL PRIMARY KEY, node_name name NOT NULL UNIQUE ) WITH (user_catalog_table=true); CREATE TABLE pglogical.node_interface ( if_id oid NOT NULL PRIMARY KEY, if_name name NOT NULL, -- default same as node name if_nodeid oid REFERENCES node(node_id), if_dsn text NOT NULL, UNIQUE (if_nodeid, if_name) ); CREATE TABLE pglogical.local_node ( node_id oid PRIMARY KEY REFERENCES node(node_id), node_local_interface oid NOT NULL REFERENCES node_interface(if_id) ); CREATE TABLE pglogical.subscription ( sub_id oid NOT NULL PRIMARY KEY, sub_name name NOT NULL UNIQUE, sub_origin oid NOT NULL REFERENCES node(node_id), sub_target oid NOT NULL REFERENCES node(node_id), sub_origin_if oid NOT NULL REFERENCES node_interface(if_id), sub_target_if oid NOT NULL REFERENCES node_interface(if_id), sub_enabled boolean NOT NULL DEFAULT true, sub_slot_name name NOT NULL, sub_replication_sets text[], sub_forward_origins text[], sub_apply_delay interval NOT NULL DEFAULT '0', sub_force_text_transfer boolean NOT NULL DEFAULT 'f' ); CREATE TABLE pglogical.local_sync_status ( sync_kind "char" NOT NULL CHECK (sync_kind IN ('i', 's', 'd', 'f')), sync_subid oid NOT NULL REFERENCES pglogical.subscription(sub_id), sync_nspname name, sync_relname name, sync_status "char" NOT NULL, sync_statuslsn pg_lsn NOT NULL, UNIQUE (sync_subid, sync_nspname, sync_relname) ); CREATE FUNCTION pglogical.create_node(node_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_node'; CREATE FUNCTION pglogical.drop_node(node_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_node'; CREATE FUNCTION pglogical.alter_node_add_interface(node_name name, interface_name name, dsn text) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_add_interface'; CREATE FUNCTION pglogical.alter_node_drop_interface(node_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_node_drop_interface'; CREATE FUNCTION pglogical.create_subscription(subscription_name name, provider_dsn text, replication_sets text[] = '{default,default_insert_only,ddl_sql}', synchronize_structure boolean = false, synchronize_data boolean = true, forward_origins text[] = '{all}', apply_delay interval DEFAULT '0', force_text_transfer boolean = false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_subscription'; CREATE FUNCTION pglogical.drop_subscription(subscription_name name, ifexists boolean DEFAULT false) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_subscription'; CREATE FUNCTION pglogical.alter_subscription_interface(subscription_name name, interface_name name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_interface'; CREATE FUNCTION pglogical.alter_subscription_disable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_disable'; CREATE FUNCTION pglogical.alter_subscription_enable(subscription_name name, immediate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_enable'; CREATE FUNCTION pglogical.alter_subscription_add_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_add_replication_set'; CREATE FUNCTION pglogical.alter_subscription_remove_replication_set(subscription_name name, replication_set name) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_remove_replication_set'; CREATE FUNCTION pglogical.show_subscription_status(subscription_name name DEFAULT NULL, OUT subscription_name text, OUT status text, OUT provider_node text, OUT provider_dsn text, OUT slot_name text, OUT replication_sets text[], OUT forward_origins text[]) RETURNS SETOF record STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_status'; CREATE TABLE pglogical.replication_set ( set_id oid NOT NULL PRIMARY KEY, set_nodeid oid NOT NULL, set_name name NOT NULL, replicate_insert boolean NOT NULL DEFAULT true, replicate_update boolean NOT NULL DEFAULT true, replicate_delete boolean NOT NULL DEFAULT true, replicate_truncate boolean NOT NULL DEFAULT true, UNIQUE (set_nodeid, set_name) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_table ( set_id oid NOT NULL, set_reloid regclass NOT NULL, set_att_list text[], set_row_filter pg_node_tree, PRIMARY KEY(set_id, set_reloid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.replication_set_seq ( set_id oid NOT NULL, set_seqoid regclass NOT NULL, PRIMARY KEY(set_id, set_seqoid) ) WITH (user_catalog_table=true); CREATE TABLE pglogical.sequence_state ( seqoid oid NOT NULL PRIMARY KEY, cache_size integer NOT NULL, last_value bigint NOT NULL ) WITH (user_catalog_table=true); CREATE TABLE pglogical.depend ( classid oid NOT NULL, objid oid NOT NULL, objsubid integer NOT NULL, refclassid oid NOT NULL, refobjid oid NOT NULL, refobjsubid integer NOT NULL, deptype "char" NOT NULL ) WITH (user_catalog_table=true); CREATE VIEW pglogical.TABLES AS WITH set_relations AS ( SELECT s.set_name, r.set_reloid FROM pglogical.replication_set_table r, pglogical.replication_set s, pglogical.local_node n WHERE s.set_nodeid = n.node_id AND s.set_id = r.set_id ), user_tables AS ( SELECT r.oid, n.nspname, r.relname, r.relreplident FROM pg_catalog.pg_class r, pg_catalog.pg_namespace n WHERE r.relkind = 'r' AND r.relpersistence = 'p' AND n.oid = r.relnamespace AND n.nspname !~ '^pg_' AND n.nspname != 'information_schema' AND n.nspname != 'pglogical' ) SELECT r.oid AS relid, n.nspname, r.relname, s.set_name FROM pg_catalog.pg_namespace n, pg_catalog.pg_class r, set_relations s WHERE r.relkind = 'r' AND n.oid = r.relnamespace AND r.oid = s.set_reloid UNION SELECT t.oid AS relid, t.nspname, t.relname, NULL FROM user_tables t WHERE t.oid NOT IN (SELECT set_reloid FROM set_relations); CREATE FUNCTION pglogical.create_replication_set(set_name name, replicate_insert boolean = true, replicate_update boolean = true, replicate_delete boolean = true, replicate_truncate boolean = true) RETURNS oid STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_create_replication_set'; CREATE FUNCTION pglogical.alter_replication_set(set_name name, replicate_insert boolean DEFAULT NULL, replicate_update boolean DEFAULT NULL, replicate_delete boolean DEFAULT NULL, replicate_truncate boolean DEFAULT NULL) RETURNS oid CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_replication_set'; CREATE FUNCTION pglogical.drop_replication_set(set_name name, ifexists boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_drop_replication_set'; CREATE FUNCTION pglogical.replication_set_add_table(set_name name, relation regclass, synchronize_data boolean DEFAULT false, columns text[] DEFAULT NULL, row_filter text DEFAULT NULL) RETURNS boolean CALLED ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_table'; CREATE FUNCTION pglogical.replication_set_add_all_tables(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_tables'; CREATE FUNCTION pglogical.replication_set_remove_table(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_table'; CREATE FUNCTION pglogical.replication_set_add_sequence(set_name name, relation regclass, synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_sequence'; CREATE FUNCTION pglogical.replication_set_add_all_sequences(set_name name, schema_names text[], synchronize_data boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_add_all_sequences'; CREATE FUNCTION pglogical.replication_set_remove_sequence(set_name name, relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replication_set_remove_sequence'; CREATE FUNCTION pglogical.alter_subscription_synchronize(subscription_name name, truncate boolean DEFAULT false) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_synchronize'; CREATE FUNCTION pglogical.alter_subscription_resynchronize_table(subscription_name name, relation regclass, truncate boolean DEFAULT true) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_alter_subscription_resynchronize_table'; CREATE FUNCTION pglogical.synchronize_sequence(relation regclass) RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_synchronize_sequence'; CREATE FUNCTION pglogical.table_data_filtered(reltyp anyelement, relation regclass, repsets text[]) RETURNS SETOF anyelement CALLED ON NULL INPUT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_table_data_filtered'; CREATE FUNCTION pglogical.show_repset_table_info(relation regclass, repsets text[], OUT relid oid, OUT nspname text, OUT relname text, OUT att_list text[], OUT has_row_filter boolean) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_repset_table_info'; CREATE FUNCTION pglogical.show_subscription_table(subscription_name name, relation regclass, OUT nspname text, OUT relname text, OUT status text) RETURNS record STRICT STABLE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_show_subscription_table'; CREATE TABLE pglogical.queue ( queued_at timestamp with time zone NOT NULL, role name NOT NULL, replication_sets text[], message_type "char" NOT NULL, message json NOT NULL ); CREATE FUNCTION pglogical.replicate_ddl_command(command text, replication_sets text[] DEFAULT '{ddl_sql}') RETURNS boolean STRICT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_replicate_ddl_command'; CREATE OR REPLACE FUNCTION pglogical.queue_truncate() RETURNS trigger LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_queue_truncate'; CREATE FUNCTION pglogical.pglogical_node_info(OUT node_id oid, OUT node_name text, OUT sysid text, OUT dbname text, OUT replication_sets text) RETURNS record STABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.pglogical_gen_slot_name(name, name, name) RETURNS name IMMUTABLE STRICT LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version() RETURNS text LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_version_num() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_max_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical_min_proto_version() RETURNS integer LANGUAGE c AS 'MODULE_PATHNAME'; CREATE FUNCTION pglogical.wait_slot_confirm_lsn(slotname name, target pg_lsn) RETURNS void LANGUAGE c AS 'pglogical','pglogical_wait_slot_confirm_lsn'; CREATE FUNCTION pglogical.wait_for_subscription_sync_complete(subscription_name name) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_subscription_sync_complete'; CREATE FUNCTION pglogical.wait_for_table_sync_complete(subscription_name name, relation regclass) RETURNS void RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_wait_for_table_sync_complete'; CREATE FUNCTION pglogical.xact_commit_timestamp_origin("xid" xid, OUT "timestamp" timestamptz, OUT "roident" oid) RETURNS record RETURNS NULL ON NULL INPUT VOLATILE LANGUAGE c AS 'MODULE_PATHNAME', 'pglogical_xact_commit_timestamp_origin'; pglogical-REL2_4_1/pglogical.c000066400000000000000000000547711415142317000163050ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical.c * pglogical initialization and common functionality * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/hash.h" #include "access/htup_details.h" #include "access/xact.h" #include "access/xlog.h" #include "catalog/pg_extension.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_database.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "commands/trigger.h" #include "executor/executor.h" #include "mb/pg_wchar.h" #include "nodes/nodeFuncs.h" #include "optimizer/planner.h" #include "parser/parse_coerce.h" #include "replication/reorderbuffer.h" #include "storage/ipc.h" #include "storage/proc.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "pgstat.h" #include "pglogical_executor.h" #include "pglogical_node.h" #include "pglogical_conflict.h" #include "pglogical_worker.h" #include "pglogical.h" PG_MODULE_MAGIC; static const struct config_enum_entry PGLogicalConflictResolvers[] = { {"error", PGLOGICAL_RESOLVE_ERROR, false}, #ifndef XCP {"apply_remote", PGLOGICAL_RESOLVE_APPLY_REMOTE, false}, {"keep_local", PGLOGICAL_RESOLVE_KEEP_LOCAL, false}, {"last_update_wins", PGLOGICAL_RESOLVE_LAST_UPDATE_WINS, false}, {"first_update_wins", PGLOGICAL_RESOLVE_FIRST_UPDATE_WINS, false}, #endif {NULL, 0, false} }; /* copied fom guc.c */ static const struct config_enum_entry server_message_level_options[] = { {"debug", DEBUG2, true}, {"debug5", DEBUG5, false}, {"debug4", DEBUG4, false}, {"debug3", DEBUG3, false}, {"debug2", DEBUG2, false}, {"debug1", DEBUG1, false}, {"info", INFO, false}, {"notice", NOTICE, false}, {"warning", WARNING, false}, {"error", ERROR, false}, {"log", LOG, false}, {"fatal", FATAL, false}, {"panic", PANIC, false}, {NULL, 0, false} }; bool pglogical_synchronous_commit = false; char *pglogical_temp_directory = ""; bool pglogical_use_spi = false; bool pglogical_batch_inserts = true; static char *pglogical_temp_directory_config; void _PG_init(void); void pglogical_supervisor_main(Datum main_arg); char *pglogical_extra_connection_options; static PGconn * pglogical_connect_base(const char *connstr, const char *appname, const char *suffix, bool replication); /* * Ensure string is not longer than maxlen. * * The way we do this is we if the string is longer we return prefix from that * string and hash of the string which will together be exatly maxlen. * * Maxlen can't be less than 8 because hash produces uint32 which in hex form * can have up to 8 characters. */ char * shorten_hash(const char *str, int maxlen) { char *ret; int len = strlen(str); Assert(maxlen >= 8); if (len <= maxlen) return pstrdup(str); ret = (char *) palloc(maxlen + 1); snprintf(ret, maxlen, "%.*s%08x", maxlen - 8, str, DatumGetUInt32(hash_any((unsigned char *) str, len))); ret[maxlen] = '\0'; return ret; } /* * Convert text array to list of strings. * * Note: the resulting list points to the memory of the input array. */ List * textarray_to_list(ArrayType *textarray) { Datum *elems; int nelems, i; List *res = NIL; deconstruct_array(textarray, TEXTOID, -1, false, 'i', &elems, NULL, &nelems); if (nelems == 0) return NIL; for (i = 0; i < nelems; i++) res = lappend(res, TextDatumGetCString(elems[i])); return res; } /* * Deconstruct the text representation of a 1-dimensional Postgres array * into individual items. * * On success, returns true and sets *itemarray and *nitems to describe * an array of individual strings. On parse failure, returns false; * *itemarray may exist or be NULL. * * NOTE: free'ing itemarray is sufficient to deallocate the working storage. */ bool parsePGArray(const char *atext, char ***itemarray, int *nitems) { int inputlen; char **items; char *strings; int curitem; /* * We expect input in the form of "{item,item,item}" where any item is * either raw data, or surrounded by double quotes (in which case embedded * characters including backslashes and quotes are backslashed). * * We build the result as an array of pointers followed by the actual * string data, all in one malloc block for convenience of deallocation. * The worst-case storage need is not more than one pointer and one * character for each input character (consider "{,,,,,,,,,,}"). */ *itemarray = NULL; *nitems = 0; inputlen = strlen(atext); if (inputlen < 2 || atext[0] != '{' || atext[inputlen - 1] != '}') return false; /* bad input */ items = (char **) malloc(inputlen * (sizeof(char *) + sizeof(char))); if (items == NULL) return false; /* out of memory */ *itemarray = items; strings = (char *) (items + inputlen); atext++; /* advance over initial '{' */ curitem = 0; while (*atext != '}') { if (*atext == '\0') return false; /* premature end of string */ items[curitem] = strings; while (*atext != '}' && *atext != ',') { if (*atext == '\0') return false; /* premature end of string */ if (*atext != '"') *strings++ = *atext++; /* copy unquoted data */ else { /* process quoted substring */ atext++; while (*atext != '"') { if (*atext == '\0') return false; /* premature end of string */ if (*atext == '\\') { atext++; if (*atext == '\0') return false; /* premature end of string */ } *strings++ = *atext++; /* copy quoted data */ } atext++; } } *strings++ = '\0'; if (*atext == ',') atext++; curitem++; } if (atext[1] != '\0') return false; /* bogus syntax (embedded '}') */ *nitems = curitem; return true; } /* * Get oid of our queue table. */ inline Oid get_pglogical_table_oid(const char *table) { Oid nspoid; Oid reloid; nspoid = get_namespace_oid(EXTENSION_NAME, false); reloid = get_relname_relid(table, nspoid); if (reloid == InvalidOid) elog(ERROR, "cache lookup failed for relation %s.%s", EXTENSION_NAME, table); return reloid; } #define CONN_PARAM_ARRAY_SIZE 9 static PGconn * pglogical_connect_base(const char *connstr, const char *appname, const char *suffix, bool replication) { int i=0; PGconn *conn; const char *keys[CONN_PARAM_ARRAY_SIZE]; const char *vals[CONN_PARAM_ARRAY_SIZE]; StringInfoData s; initStringInfo(&s); appendStringInfoString(&s, pglogical_extra_connection_options); appendStringInfoChar(&s, ' '); appendStringInfoString(&s, connstr); keys[i] = "dbname"; vals[i] = connstr; i++; keys[i] = "application_name"; if (suffix) { char s[NAMEDATALEN]; snprintf(s, NAMEDATALEN, "%s_%s", shorten_hash(appname, NAMEDATALEN - strlen(suffix) - 2), suffix); vals[i] = s; } else vals[i] = appname; i++; keys[i] = "connect_timeout"; vals[i] = "30"; i++; keys[i] = "keepalives"; vals[i] = "1"; i++; keys[i] = "keepalives_idle"; vals[i] = "20"; i++; keys[i] = "keepalives_interval"; vals[i] = "20"; i++; keys[i] = "keepalives_count"; vals[i] = "5"; i++; keys[i] = "replication"; vals[i] = replication ? "database" : NULL; i++; keys[i] = NULL; vals[i] = NULL; Assert(i <= CONN_PARAM_ARRAY_SIZE); /* * We use the expand_dbname parameter to process the connection string * (or URI), and pass some extra options. */ conn = PQconnectdbParams(keys, vals, /* expand_dbname = */ true); if (PQstatus(conn) != CONNECTION_OK) { ereport(ERROR, (errmsg("could not connect to the postgresql server%s: %s", replication ? " in replication mode" : "", PQerrorMessage(conn)), errdetail("dsn was: %s", s.data))); } resetStringInfo(&s); return conn; } /* * Make standard postgres connection, ERROR on failure. */ PGconn * pglogical_connect(const char *connstring, const char *connname, const char *suffix) { return pglogical_connect_base(connstring, connname, suffix, false); } /* * Make replication connection, ERROR on failure. */ PGconn * pglogical_connect_replica(const char *connstring, const char *connname, const char *suffix) { return pglogical_connect_base(connstring, connname, suffix, true); } /* * Make sure the extension is up to date. * * Called by db manager. */ void pglogical_manage_extension(void) { Relation extrel; SysScanDesc scandesc; HeapTuple tuple; ScanKeyData key[1]; if (RecoveryInProgress()) return; PushActiveSnapshot(GetTransactionSnapshot()); /* make sure we're operating without other pglogical workers interfering */ extrel = table_open(ExtensionRelationId, ShareUpdateExclusiveLock); ScanKeyInit(&key[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(EXTENSION_NAME)); scandesc = systable_beginscan(extrel, ExtensionNameIndexId, true, NULL, 1, key); tuple = systable_getnext(scandesc); /* No extension, nothing to update. */ if (HeapTupleIsValid(tuple)) { Datum datum; bool isnull; char *extversion; /* Determine extension version. */ datum = heap_getattr(tuple, Anum_pg_extension_extversion, RelationGetDescr(extrel), &isnull); if (isnull) elog(ERROR, "extversion is null"); extversion = text_to_cstring(DatumGetTextPP(datum)); /* Only run the alter if the versions don't match. */ if (strcmp(extversion, PGLOGICAL_VERSION) != 0) { AlterExtensionStmt alter_stmt; alter_stmt.options = NIL; alter_stmt.extname = EXTENSION_NAME; ExecAlterExtensionStmt(&alter_stmt); } } systable_endscan(scandesc); table_close(extrel, NoLock); PopActiveSnapshot(); } /* * Call IDENTIFY_SYSTEM on the connection and report its results. */ void pglogical_identify_system(PGconn *streamConn, uint64* sysid, TimeLineID *timeline, XLogRecPtr *xlogpos, Name *dbname) { PGresult *res; res = PQexec(streamConn, "IDENTIFY_SYSTEM"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { elog(ERROR, "could not send replication command \"%s\": %s", "IDENTIFY_SYSTEM", PQerrorMessage(streamConn)); } if (PQntuples(res) != 1 || PQnfields(res) < 4) { elog(ERROR, "could not identify system: got %d rows and %d fields, expected %d rows and at least %d fields\n", PQntuples(res), PQnfields(res), 1, 4); } if (PQnfields(res) > 4) { elog(DEBUG2, "ignoring extra fields in IDENTIFY_SYSTEM response; expected 4, got %d", PQnfields(res)); } if (sysid != NULL) { const char *remote_sysid = PQgetvalue(res, 0, 0); if (sscanf(remote_sysid, UINT64_FORMAT, sysid) != 1) elog(ERROR, "could not parse remote sysid %s", remote_sysid); } if (timeline != NULL) { const char *remote_tlid = PQgetvalue(res, 0, 1); if (sscanf(remote_tlid, "%u", timeline) != 1) elog(ERROR, "could not parse remote tlid %s", remote_tlid); } if (xlogpos != NULL) { const char *remote_xlogpos = PQgetvalue(res, 0, 2); uint32 xlogpos_low, xlogpos_high; if (sscanf(remote_xlogpos, "%X/%X", &xlogpos_high, &xlogpos_low) != 2) elog(ERROR, "could not parse remote xlogpos %s", remote_xlogpos); *xlogpos = (((XLogRecPtr)xlogpos_high)<<32) + xlogpos_low; } if (dbname != NULL) { char *remote_dbname = PQgetvalue(res, 0, 3); strncpy(NameStr(**dbname), remote_dbname, NAMEDATALEN); NameStr(**dbname)[NAMEDATALEN-1] = '\0'; } PQclear(res); } void pglogical_start_replication(PGconn *streamConn, const char *slot_name, XLogRecPtr start_pos, const char *forward_origins, const char *replication_sets, const char *replicate_only_table, bool force_text_transfer) { StringInfoData command; PGresult *res; char *sqlstate; const char *want_binary = (force_text_transfer ? "0" : "1"); initStringInfo(&command); appendStringInfo(&command, "START_REPLICATION SLOT \"%s\" LOGICAL %X/%X (", slot_name, (uint32) (start_pos >> 32), (uint32) start_pos); /* Basic protocol info. */ appendStringInfo(&command, "expected_encoding '%s'", GetDatabaseEncodingName()); appendStringInfo(&command, ", min_proto_version '%d'", PGLOGICAL_MIN_PROTO_VERSION_NUM); appendStringInfo(&command, ", max_proto_version '%d'", PGLOGICAL_MAX_PROTO_VERSION_NUM); appendStringInfo(&command, ", startup_params_format '1'"); /* Binary protocol compatibility. */ appendStringInfo(&command, ", \"binary.want_internal_basetypes\" '%s'", want_binary); appendStringInfo(&command, ", \"binary.want_binary_basetypes\" '%s'", want_binary); appendStringInfo(&command, ", \"binary.basetypes_major_version\" '%u'", PG_VERSION_NUM/100); appendStringInfo(&command, ", \"binary.sizeof_datum\" '%zu'", sizeof(Datum)); appendStringInfo(&command, ", \"binary.sizeof_int\" '%zu'", sizeof(int)); appendStringInfo(&command, ", \"binary.sizeof_long\" '%zu'", sizeof(long)); appendStringInfo(&command, ", \"binary.bigendian\" '%d'", #ifdef WORDS_BIGENDIAN true #else false #endif ); appendStringInfo(&command, ", \"binary.float4_byval\" '%d'", #ifdef USE_FLOAT4_BYVAL true #else false #endif ); appendStringInfo(&command, ", \"binary.float8_byval\" '%d'", #ifdef USE_FLOAT8_BYVAL true #else false #endif ); appendStringInfo(&command, ", \"binary.integer_datetimes\" '%d'", #ifdef USE_INTEGER_DATETIMES true #else false #endif ); /* We don't care about this anymore but pglogical 1.x expects this. */ appendStringInfoString(&command, ", \"hooks.setup_function\" 'pglogical.pglogical_hooks_setup'"); if (forward_origins) appendStringInfo(&command, ", \"pglogical.forward_origins\" %s", quote_literal_cstr(forward_origins)); if (replicate_only_table) { /* Send the table name we want to the upstream */ appendStringInfoString(&command, ", \"pglogical.replicate_only_table\" "); appendStringInfoString(&command, quote_literal_cstr(replicate_only_table)); } if (replication_sets) { /* Send the replication set names we want to the upstream */ appendStringInfoString(&command, ", \"pglogical.replication_set_names\" "); appendStringInfoString(&command, quote_literal_cstr(replication_sets)); } /* Tell the upstream that we want unbounded metadata cache size */ appendStringInfoString(&command, ", \"relmeta_cache_size\" '-1'"); /* general info about the downstream */ appendStringInfo(&command, ", pg_version '%u'", PG_VERSION_NUM); appendStringInfo(&command, ", pglogical_version '%s'", PGLOGICAL_VERSION); appendStringInfo(&command, ", pglogical_version_num '%d'", PGLOGICAL_VERSION_NUM); appendStringInfo(&command, ", pglogical_apply_pid '%d'", MyProcPid); appendStringInfoChar(&command, ')'); res = PQexec(streamConn, command.data); sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (PQresultStatus(res) != PGRES_COPY_BOTH) elog(FATAL, "could not send replication command \"%s\": %s\n, sqlstate: %s", command.data, PQresultErrorMessage(res), sqlstate); PQclear(res); } /* * Start the manager workers for every db which has a pglogical node. * * Note that we start workers that are not necessary here. We do this because * we need to check every individual database to check if there is pglogical * node setup and it's not possible to switch connections to different * databases within one background worker. The workers that won't find any * pglogical node setup will exit immediately during startup. * This behavior can cause issue where we consume all the allowed workers and * eventually error out even though the max_worker_processes is set high enough * to satisfy the actual needed worker count. * * Must be run inside a transaction. */ static void start_manager_workers(void) { Relation rel; TableScanDesc scan; HeapTuple tup; /* Run manager worker for every connectable database. */ rel = table_open(DatabaseRelationId, AccessShareLock); scan = table_beginscan_catalog(rel, 0, NULL); while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection))) { Form_pg_database pgdatabase = (Form_pg_database) GETSTRUCT(tup); #if PG_VERSION_NUM < 120000 Oid dboid = HeapTupleGetOid(tup); #else Oid dboid = pgdatabase->oid; #endif PGLogicalWorker worker; CHECK_FOR_INTERRUPTS(); /* Can't run workers on databases which don't allow connection. */ if (!pgdatabase->datallowconn) continue; /* Worker already attached, nothing to do. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); if (pglogical_worker_running(pglogical_manager_find(dboid))) { LWLockRelease(PGLogicalCtx->lock); continue; } LWLockRelease(PGLogicalCtx->lock); /* No record found, try running new worker. */ elog(DEBUG1, "registering pglogical manager process for database %s", NameStr(pgdatabase->datname)); memset(&worker, 0, sizeof(PGLogicalWorker)); worker.worker_type = PGLOGICAL_WORKER_MANAGER; worker.dboid = dboid; pglogical_worker_register(&worker); } table_endscan(scan); table_close(rel, AccessShareLock); } /* * Static bgworker used for initialization and management (our main process). */ void pglogical_supervisor_main(Datum main_arg) { /* Establish signal handlers. */ pqsignal(SIGTERM, handle_sigterm); BackgroundWorkerUnblockSignals(); /* * Initialize supervisor info in shared memory. Strictly speaking we * don't need a lock here, because no other process could possibly be * looking at this shared struct since they're all started by the * supervisor, but let's be safe. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); PGLogicalCtx->supervisor = MyProc; PGLogicalCtx->subscriptions_changed = true; LWLockRelease(PGLogicalCtx->lock); /* Make it easy to identify our processes. */ SetConfigOption("application_name", MyBgworkerEntry->bgw_name, PGC_USERSET, PGC_S_SESSION); elog(LOG, "starting pglogical supervisor"); VALGRIND_PRINTF("PGLOGICAL: supervisor\n"); /* Setup connection to pinned catalogs (we only ever read pg_database). */ #if PG_VERSION_NUM >= 110000 BackgroundWorkerInitializeConnection(NULL, NULL, 0); #elif PG_VERSION_NUM >= 90500 BackgroundWorkerInitializeConnection(NULL, NULL); #else BackgroundWorkerInitializeConnection("postgres", NULL); #endif /* Main wait loop. */ while (!got_SIGTERM) { int rc; if (PGLogicalCtx->subscriptions_changed) { /* * No need to lock here, since we'll take account of all sub * changes up to this point, even if new ones were added between * the test above and flag clear. We're just being woken up. */ PGLogicalCtx->subscriptions_changed = false; StartTransactionCommand(); start_manager_workers(); CommitTransactionCommand(); } rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 180000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); } VALGRIND_PRINTF("PGLOGICAL: supervisor exit\n"); proc_exit(0); } static void pglogical_temp_directory_assing_hook(const char *newval, void *extra) { if (strlen(newval)) { pglogical_temp_directory = strdup(newval); } else { #ifndef WIN32 const char *tmpdir = getenv("TMPDIR"); if (!tmpdir) tmpdir = "/tmp"; #else char tmpdir[MAXPGPATH]; int ret; ret = GetTempPath(MAXPGPATH, tmpdir); if (ret == 0 || ret > MAXPGPATH) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not locate temporary directory: %s\n", !ret ? strerror(errno) : ""))); return false; } #endif pglogical_temp_directory = strdup(tmpdir); } if (pglogical_temp_directory == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } /* * Entry point for this module. */ void _PG_init(void) { BackgroundWorker bgw; if (!process_shared_preload_libraries_in_progress) elog(ERROR, "pglogical is not in shared_preload_libraries"); DefineCustomEnumVariable("pglogical.conflict_resolution", gettext_noop("Sets method used for conflict resolution for resolvable conflicts."), NULL, &pglogical_conflict_resolver, #ifdef XCP PGLOGICAL_RESOLVE_ERROR, #else PGLOGICAL_RESOLVE_APPLY_REMOTE, #endif PGLogicalConflictResolvers, PGC_SUSET, 0, pglogical_conflict_resolver_check_hook, NULL, NULL); DefineCustomEnumVariable("pglogical.conflict_log_level", gettext_noop("Sets log level used for logging resolved conflicts."), NULL, &pglogical_conflict_log_level, LOG, server_message_level_options, PGC_SUSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pglogical.synchronous_commit", "pglogical specific synchronous commit value", NULL, &pglogical_synchronous_commit, false, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pglogical.use_spi", "Use SPI instead of low-level API for applying changes", NULL, &pglogical_use_spi, #ifdef XCP true, #else false, #endif PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomBoolVariable("pglogical.batch_inserts", "Batch inserts if possible", NULL, &pglogical_batch_inserts, true, PGC_POSTMASTER, 0, NULL, NULL, NULL); /* * We can't use the temp_tablespace safely for our dumps, because Pg's * crash recovery is very careful to delete only particularly formatted * files. Instead for now just allow user to specify dump storage. */ DefineCustomStringVariable("pglogical.temp_directory", "Directory to store dumps for local restore", NULL, &pglogical_temp_directory_config, "", PGC_SIGHUP, 0, NULL, pglogical_temp_directory_assing_hook, NULL); DefineCustomStringVariable("pglogical.extra_connection_options", "connection options to add to all peer node connections", NULL, &pglogical_extra_connection_options, "", PGC_SIGHUP, 0, NULL, NULL, NULL); if (IsBinaryUpgrade) return; /* Init workers. */ pglogical_worker_shmem_init(); /* Init executor module */ pglogical_executor_init(); /* Run the supervisor. */ memset(&bgw, 0, sizeof(bgw)); bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; bgw.bgw_start_time = BgWorkerStart_RecoveryFinished; snprintf(bgw.bgw_library_name, BGW_MAXLEN, EXTENSION_NAME); snprintf(bgw.bgw_function_name, BGW_MAXLEN, "pglogical_supervisor_main"); snprintf(bgw.bgw_name, BGW_MAXLEN, "pglogical supervisor"); bgw.bgw_restart_time = 5; RegisterBackgroundWorker(&bgw); } pglogical-REL2_4_1/pglogical.control.in000066400000000000000000000003051415142317000201300ustar00rootroot00000000000000# pglogical extension comment = 'PostgreSQL Logical Replication' default_version = '__PGLOGICAL_VERSION__' module_pathname = '$libdir/pglogical' relocatable = false __REQUIRES__ schema = pglogical pglogical-REL2_4_1/pglogical.h000066400000000000000000000066131415142317000163020ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical.h * pglogical replication plugin * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_H #define PGLOGICAL_H #include "storage/s_lock.h" #include "postmaster/bgworker.h" #include "utils/array.h" #include "access/xlogdefs.h" #include "executor/executor.h" #include "libpq-fe.h" #include "pglogical_fe.h" #include "pglogical_node.h" #include "pglogical_compat.h" #define PGLOGICAL_VERSION "2.4.1" #define PGLOGICAL_VERSION_NUM 20401 #define PGLOGICAL_MIN_PROTO_VERSION_NUM 1 #define PGLOGICAL_MAX_PROTO_VERSION_NUM 1 #define EXTENSION_NAME "pglogical" #define REPLICATION_ORIGIN_ALL "all" #if PG_VERSION_NUM >= 90500 #define HAVE_REPLICATION_ORIGINS #endif extern bool pglogical_synchronous_commit; extern char *pglogical_temp_directory; extern bool pglogical_use_spi; extern bool pglogical_batch_inserts; extern char *pglogical_extra_connection_options; extern char *shorten_hash(const char *str, int maxlen); extern List *textarray_to_list(ArrayType *textarray); extern bool parsePGArray(const char *atext, char ***itemarray, int *nitems); extern Oid get_pglogical_table_oid(const char *table); extern void pglogical_execute_sql_command(char *cmdstr, char *role, bool isTopLevel); extern PGconn *pglogical_connect(const char *connstring, const char *connname, const char *suffix); extern PGconn *pglogical_connect_replica(const char *connstring, const char *connname, const char *suffix); extern void pglogical_identify_system(PGconn *streamConn, uint64* sysid, TimeLineID *timeline, XLogRecPtr *xlogpos, Name *dbname); extern void pglogical_start_replication(PGconn *streamConn, const char *slot_name, XLogRecPtr start_pos, const char *forward_origins, const char *replication_sets, const char *replicate_only_table, bool force_text_transfer); extern void pglogical_manage_extension(void); extern void apply_work(PGconn *streamConn); extern bool synchronize_sequences(void); extern void synchronize_sequence(Oid seqoid); extern void pglogical_create_sequence_state_record(Oid seqoid); extern void pglogical_drop_sequence_state_record(Oid seqoid); extern int64 sequence_get_last_value(Oid seqoid); extern bool in_pglogical_replicate_ddl_command; #include "utils/memdebug.h" /* * PostgreSQL exposes stubs for some Valgrind macros, but there are some * others we use that aren't supported by Pg proper yet. */ #ifndef USE_VALGRIND #define VALGRIND_CHECK_VALUE_IS_DEFINED(v) do{} while(0) #define VALGRIND_DO_LEAK_CHECK do{} while(0) #define VALGRIND_DO_ADDED_LEAK_CHECK do{} while(0) #define VALGRIND_DO_CHANGED_LEAK_CHECK do{} while(0) #define VALGRIND_DO_QUICK_LEAK_CHECK do{} while(0) #define VALGRIND_DISABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_ENABLE_ERROR_REPORTING do {} while (0) /* * Gives us some error checking when no-op'd. pglogical uses this to report * the worker type, etc, prefixed by PGLOGICAL:, in the Valgrind logs. We * need to stub it out if we aren't using valgrind. */ pg_attribute_printf(1, 2) pg_attribute_unused() static inline void VALGRIND_PRINTF(const char *format, ...) {} #endif #endif /* PGLOGICAL_H */ pglogical-REL2_4_1/pglogical.supp000066400000000000000000000222471415142317000170430ustar00rootroot00000000000000# We don't care about leaks in work done only once # during startup, so suppress them. We could possibly free # these cleanly, but ... meh. { pgl_apply_start_sync_sub Memcheck:Leak match-leak-kinds: possible,definite,reachable fun:*lloc* ... fun:pglogical_sync_subscription fun:pglogical_apply_main } { pgl_apply_start_getsub Memcheck:Leak match-leak-kinds: possible,definite,reachable fun:*lloc* ... fun:get_subscription fun:pglogical_apply_main } { pgl_apply_start_queuetable Memcheck:Leak match-leak-kinds: possible,definite,reachable fun:*lloc* ... fun:get_queue_table_oid fun:pglogical_apply_main } { pgl_apply_start_bgw Memcheck:Leak match-leak-kinds: possible,definite,reachable fun:*lloc* ... fun:BackgroundWorkerInitializeConnectionByOid fun:pglogical_apply_main } { pgl_apply_start_replorigin Memcheck:Leak match-leak-kinds: possible,definite,reachable fun:*lloc* ... fun:replorigin_by_name fun:pglogical_apply_main } { pgl_apply_start_resowner Memcheck:Leak match-leak-kinds: definite,definite,reachable fun:*lloc* ... fun:ResourceOwnerCreate fun:pglogical_apply_main } { pgl_bgw_init Memcheck:Leak match-leak-kinds: possible fun:malloc fun:RegisterBackgroundWorker fun:_PG_init } # # pglogical's relcache persists across txns. # # These allocations are in CacheMemoryContext. # { pgl_relcache_update Memcheck:Leak match-leak-kinds: possible fun:*lloc* ... fun:pglogical_relation_cache_update ... fun:apply_work } { pgl_relcache_open Memcheck:Leak match-leak-kinds: reachable fun:*lloc* ... fun:pglogical_relation_open } # # # These produce a lot of noise. They may be real leaks and need looking into, # but are suppressed for now while I look for other issues. Pg should complain # if we fail to close an opened relation. # { pgl_relation Memcheck:Leak match-leak-kinds: possible fun:*lloc* ... fun:relation_open } { pgl_relation Memcheck:Leak match-leak-kinds: possible fun:*lloc* ... fun:relation_open } # # XXX # # Valgrind keeps claiming that bms_copy escapes heap_update. I cannot find how # that could be happening. We'll suppress the report for now on the assumption # that anything happening this deep in Pg should be causing much greater upset # than in pgl, but it merits further digging. # { pgl_pg_bms_copy Memcheck:Leak match-leak-kinds: reachable fun:palloc fun:bms_copy fun:RelationGetIndexAttrBitmap fun:heap_update ... fun:standard_ProcessUtility fun:pglogical_ProcessUtility ... fun:handle_sql ... fun:apply_work } # Anything else in standard_ProcessUtility called via pglogical_ProcessUtility # during pgl SQL execution. # # This MUST not be invoked in TopMemoryContext, and so long as txn state is sane # cannot then allocate into TopMemoryContext. (Shame Valgrind doesn't let us name # mempools for allocations). { pgl_sql_processutility Memcheck:Leak match-leak-kinds: reachable,possible fun:*lloc* ... fun:standard_ProcessUtility fun:pglogical_ProcessUtility ... fun:handle_sql ... fun:apply_work fun:pglogical_apply_main } # # XXX uncertain # # We're a bit suss about this one # { Memcheck:Leak match-leak-kinds: possible fun:MemoryContextAlloc fun:MemoryContextStrdup fun:pglogical_relation_cache_update fun:pglogical_read_rel fun:handle_relation fun:replication_handler fun:apply_work } # # Core postgres noise. It doesn't try to be memcheck friendly. # { pg_conffile Memcheck:Leak match-leak-kinds: definite fun:malloc ... fun:guc_strdup ... fun:ProcessConfigFile } { pg_start Memcheck:Leak match-leak-kinds: possible fun:malloc fun:BackendStartup fun:ServerLoop fun:PostmasterMain fun:main } { pg_bgw Memcheck:Leak match-leak-kinds: possible fun:malloc ... fun:do_start_bgworker fun:maybe_start_bgworkers } { pg_bgw_register Memcheck:Leak match-leak-kinds: possible fun:malloc fun:RegisterBackgroundWorker fun:ApplyLauncherRegister fun:PostmasterMain fun:main } { pg_init_locks Memcheck:Leak match-leak-kinds: possible fun:MemoryContextAlloc* ... fun:InitLocks fun:CreateSharedMemoryAndSemaphores fun:reset_shared fun:PostmasterMain fun:main } { pg_bgw_state Memcheck:Leak match-leak-kinds: possible fun:malloc fun:BackgroundWorkerStateChange } { pg_ps_display Memcheck:Leak match-leak-kinds: definite fun:malloc fun:save_ps_display_args fun:main } # # Pg's hash impl looks like it confuses valgrind # (but verify TODO) # { pg_hash Memcheck:Leak match-leak-kinds: possible ... fun:element_alloc fun:get_hash_entry fun:hash_search_with_hash_value } { pg_pmmain_blah Memcheck:Leak match-leak-kinds: definite fun:malloc fun:strdup fun:PostmasterMain fun:main } { pg_tz Memcheck:Leak match-leak-kinds: possible fun:MemoryContextAlloc fun:element_alloc fun:hash_create fun:init_timezone_hashtable fun:pg_tzset fun:pg_timezone_initialize fun:InitializeGUCOptions fun:PostmasterMain fun:main } { pg_vac Memcheck:Leak match-leak-kinds: possible fun:malloc fun:StartAutovacuumWorker } { pg_InitPostgres Memcheck:Leak match-leak-kinds: possible fun:*lloc* ... fun:InitPostgres fun:PostgresMain } # "reachable"-only suppressions. These are used when we're looking for memory # we can still access but are progressively allocating more of during long # running processes like the apply worker our output plugin. To find them # we must exclude things we're keeping around on purpose. # # This is allocated once on entry to apply_work { pgl_reachable_msgcontext Memcheck:Leak match-leak-kinds: reachable fun:MemoryContextAlloc fun:MemoryContextCreate fun:AllocSetContextCreate fun:apply_work fun:pglogical_apply_main } # This is stashed in a static var on first allocation and reused. It # shouldn't grow. { pgl_reachable_feedback Memcheck:Leak match-leak-kinds: reachable fun:palloc ... fun:makeStringInfo fun:send_feedback fun:apply_work fun:pglogical_apply_main } # It's typical for syscache and catcache entries to live beyond a single txn. # That's kind of the point. If pgl fails to release cache entries that's not # something we can detect easily, except maybe with CLOBBER_CACHE_ALWAYS, # which runs like treacle under valgrind. { pg_reachable_catcache Memcheck:Leak match-leak-kinds: reachable,possible fun:*lloc* ... fun:SearchCatCache } # RelationGetIndexList adds indexes to the relcache. But importantly # it returns a list_copy of the result, so we must not filter out the # allocation there, only the cached stuff. { pg_reachable_indexlist Memcheck:Leak match-leak-kinds: reachable fun:MemoryContextAlloc* ... fun:index_open ... fun:RelationGetIndexList } # GUCs normally have extended lifetimes { pg_reachable_guc Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:strdup fun:guc_strdup } { pg_reachable_guc_check Memcheck:Leak match-leak-kinds: reachable,possible fun:*lloc* fun:check_role ... fun:parse_and_validate_value fun:set_config_option } # Lots of things in standard_ProcessUtility mess with the syscache # and they're not going to leak. { pg_reachable_processutility Memcheck:Leak match-leak-kinds: reachable fun:*lloc* ... fun:relation_open ... fun:standard_ProcessUtility } # The smgr and md layer keep track of their own resources. If we fail to close # relations we should see it at a higher level. We can see these allocations # from random heap operations because smgr relation opening happens lazily, not # at heap_open time. { pg_mdopen Memcheck:Leak match-leak-kinds: reachable fun:*lloc* ... fun:mdopen } # # This report looks bogus. There should be no way for the allocated wait-event # set to escape WaitLatchOrSocket. In pgl it only arises once anyway. # { pg_waitlatchorsocket Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:AllocSetAlloc fun:MemoryContextAllocZero fun:CreateWaitEventSet fun:WaitLatchOrSocket } # # If we test across a txn boundary we may see pgstat allocations, and # that's OK, we clean them later. # { pg_pgstat Memcheck:Leak match-leak-kinds: reachable fun:MemoryContextAlloc fun:get_tabstat_stack_level } # # Again, lock acquisition during heap_openrv can seem to leak if # we do a leakcheck during a txn. It's fine, pg will complain # if we fail to close relations. # { pg_heap_openrv Memcheck:Leak match-leak-kinds: reachable fun:*lloc* ... fun:heap_openrv } { pg_table_openrv Memcheck:Leak match-leak-kinds: reachable fun:*lloc* ... fun:table_openrv } # # Tupledescs are refcounted. Any check we do during a txn # will show such output. # #{ # pg_tupledesc_count # Memcheck:Leak # match-leak-kinds: reachable # fun:MemoryContextAlloc # fun:ResourceArrayEnlarge.part.4 # fun:IncrTupleDescRefCount # fun:ExecSetSlotDescriptor #} pglogical-REL2_4_1/pglogical_apply.c000066400000000000000000001462311415142317000175030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply.c * pglogical apply logic * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "libpq-fe.h" #include "pgstat.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/namespace.h" #include "commands/async.h" #include "commands/dbcommands.h" #include "commands/sequence.h" #include "commands/tablecmds.h" #include "commands/trigger.h" #include "executor/executor.h" #include "libpq/pqformat.h" #include "mb/pg_wchar.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "optimizer/planner.h" #ifdef XCP #include "pgxc/pgxcnode.h" #endif #include "replication/origin.h" #include "replication/reorderbuffer.h" #include "rewrite/rewriteHandler.h" #include "storage/ipc.h" #include "storage/lmgr.h" #include "storage/proc.h" #include "tcop/pquery.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/int8.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/snapmgr.h" #include "pglogical_conflict.h" #include "pglogical_executor.h" #include "pglogical_node.h" #include "pglogical_queue.h" #include "pglogical_relcache.h" #include "pglogical_repset.h" #include "pglogical_rpc.h" #include "pglogical_sync.h" #include "pglogical_worker.h" #include "pglogical_apply.h" #include "pglogical_apply_heap.h" #include "pglogical_apply_spi.h" #include "pglogical.h" void pglogical_apply_main(Datum main_arg); static bool in_remote_transaction = false; static XLogRecPtr remote_origin_lsn = InvalidXLogRecPtr; static RepOriginId remote_origin_id = InvalidRepOriginId; static TimeOffset apply_delay = 0; static Oid QueueRelid = InvalidOid; static List *SyncingTables = NIL; PGLogicalApplyWorker *MyApplyWorker = NULL; PGLogicalSubscription *MySubscription = NULL; static PGconn *applyconn = NULL; typedef struct PGLogicalApplyFunctions { pglogical_apply_begin_fn on_begin; pglogical_apply_commit_fn on_commit; pglogical_apply_insert_fn do_insert; pglogical_apply_update_fn do_update; pglogical_apply_delete_fn do_delete; pglogical_apply_can_mi_fn can_multi_insert; pglogical_apply_mi_add_tuple_fn multi_insert_add_tuple; pglogical_apply_mi_finish_fn multi_insert_finish; } PGLogicalApplyFunctions; static PGLogicalApplyFunctions apply_api = { .on_begin = pglogical_apply_heap_begin, .on_commit = pglogical_apply_heap_commit, .do_insert = pglogical_apply_heap_insert, .do_update = pglogical_apply_heap_update, .do_delete = pglogical_apply_heap_delete, .can_multi_insert = pglogical_apply_heap_can_mi, .multi_insert_add_tuple = pglogical_apply_heap_mi_add_tuple, .multi_insert_finish = pglogical_apply_heap_mi_finish }; /* Number of tuples inserted after which we switch to multi-insert. */ #define MIN_MULTI_INSERT_TUPLES 5 static PGLogicalRelation *last_insert_rel = NULL; static int last_insert_rel_cnt = 0; static bool use_multi_insert = false; /* * A message counter for the xact, for debugging. We don't send * the remote change LSN with messages, so this aids identification * of which change causes an error. */ static uint32 xact_action_counter; typedef struct PGLFlushPosition { dlist_node node; XLogRecPtr local_end; XLogRecPtr remote_end; } PGLFlushPosition; dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping); typedef struct ApplyExecState { EState *estate; EPQState epqstate; ResultRelInfo *resultRelInfo; TupleTableSlot *slot; } ApplyExecState; struct ActionErrCallbackArg { const char * action_name; PGLogicalRelation *rel; bool is_ddl_or_drop; }; struct ActionErrCallbackArg errcallback_arg; static TransactionId remote_xid; static void multi_insert_finish(void); static void handle_queued_message(HeapTuple msgtup, bool tx_just_started); static void handle_startup_param(const char *key, const char *value); static bool parse_bool_param(const char *key, const char *value); static void process_syncing_tables(XLogRecPtr end_lsn); static void start_sync_worker(Name nspname, Name relname); /* * Check if given relation is in process of being synchronized. * * TODO: performance */ static bool should_apply_changes_for_rel(const char *nspname, const char *relname) { if (list_length(SyncingTables) > 0) { ListCell *lc; foreach (lc, SyncingTables) { PGLogicalSyncStatus *sync = (PGLogicalSyncStatus *) lfirst(lc); if (namestrcmp(&sync->nspname, nspname) == 0 && namestrcmp(&sync->relname, relname) == 0 && (sync->status != SYNC_STATUS_READY && !(sync->status == SYNC_STATUS_SYNCDONE && sync->statuslsn <= replorigin_session_origin_lsn))) return false; } } return true; } /* * Prepare apply state details for errcontext or direct logging. * * This callback could be invoked at all sorts of weird times * so it should assume as little as psosible about the invoking * context. */ static void format_action_description( StringInfo si, const char * action_name, PGLogicalRelation *rel, bool is_ddl_or_drop) { appendStringInfoString(si, "apply "); appendStringInfoString(si, action_name == NULL ? "(unknown action)" : action_name); if (rel != NULL && rel->nspname != NULL && rel->relname != NULL && !is_ddl_or_drop) { appendStringInfo(si, " from remote relation %s.%s", rel->nspname, rel->relname); } appendStringInfo(si, " in commit before %X/%X, xid %u committed at %s (action #%u)", (uint32)(replorigin_session_origin_lsn>>32), (uint32)replorigin_session_origin_lsn, remote_xid, timestamptz_to_str(replorigin_session_origin_timestamp), xact_action_counter); if (replorigin_session_origin != InvalidRepOriginId) { appendStringInfo(si, " from node replorigin %u", replorigin_session_origin); } if (remote_origin_id != InvalidRepOriginId) { appendStringInfo(si, " forwarded from commit %X/%X on node %u", (uint32)(remote_origin_lsn>>32), (uint32)remote_origin_lsn, remote_origin_id); } } static void action_error_callback(void *arg) { StringInfoData si; initStringInfo(&si); format_action_description(&si, errcallback_arg.action_name, errcallback_arg.rel, errcallback_arg.is_ddl_or_drop); errcontext("%s", si.data); pfree(si.data); } static bool ensure_transaction(void) { if (IsTransactionState()) { if (CurrentMemoryContext != MessageContext) MemoryContextSwitchTo(MessageContext); return false; } /* * pglogical doesn't have "statements" as such, so we'll report one * statement per applied transaction. We must set the statement start time * because StartTransaction() uses it to initialize the transaction cached * timestamp used by current_timestamp. If we don't set it, every xact will * get the same current_timestamp. See 2ndQuadrant/pglogical_internal#148 */ SetCurrentStatementStartTimestamp(); StartTransactionCommand(); apply_api.on_begin(); MemoryContextSwitchTo(MessageContext); return true; } static void handle_begin(StringInfo s) { XLogRecPtr commit_lsn; TimestampTz commit_time; xact_action_counter = 1; errcallback_arg.action_name = "BEGIN"; pglogical_read_begin(s, &commit_lsn, &commit_time, &remote_xid); replorigin_session_origin_timestamp = commit_time; replorigin_session_origin_lsn = commit_lsn; remote_origin_id = InvalidRepOriginId; VALGRIND_PRINTF("PGLOGICAL_APPLY: begin %u\n", remote_xid); /* don't want the overhead otherwise */ if (apply_delay > 0) { TimestampTz current; current = GetCurrentIntegerTimestamp(); /* ensure no weirdness due to clock drift */ if (current > replorigin_session_origin_timestamp) { long sec; int usec; current = TimestampTzPlusMilliseconds(current, -apply_delay); TimestampDifference(current, replorigin_session_origin_timestamp, &sec, &usec); /* FIXME: deal with overflow? */ pg_usleep(usec + (sec * USECS_PER_SEC)); } } in_remote_transaction = true; pgstat_report_activity(STATE_RUNNING, NULL); } /* * Handle COMMIT message. */ static void handle_commit(StringInfo s) { XLogRecPtr commit_lsn; XLogRecPtr end_lsn; TimestampTz commit_time; errcallback_arg.action_name = "COMMIT"; xact_action_counter++; pglogical_read_commit(s, &commit_lsn, &end_lsn, &commit_time); Assert(commit_time == replorigin_session_origin_timestamp); if (IsTransactionState()) { PGLFlushPosition *flushpos; multi_insert_finish(); apply_api.on_commit(); /* We need to write end_lsn to the commit record. */ replorigin_session_origin_lsn = end_lsn; CommitTransactionCommand(); MemoryContextSwitchTo(TopMemoryContext); /* Track commit lsn */ flushpos = (PGLFlushPosition *) palloc(sizeof(PGLFlushPosition)); flushpos->local_end = XactLastCommitEnd; flushpos->remote_end = end_lsn; dlist_push_tail(&lsn_mapping, &flushpos->node); MemoryContextSwitchTo(MessageContext); } /* * If the xact isn't from the immediate upstream, advance the slot of the * node it originally came from so we start replay of that node's change * data at the right place. * * This is only necessary when we're streaming data from one peer (A) that * in turn receives from other peers (B, C), and we plan to later switch to * replaying directly from B and/or C, no longer receiving forwarded xacts * from A. When we do the switchover we need to know the right place at * which to start replay from B and C. We don't actually do that yet, but * we'll want to be able to do cascaded initialisation in future, so it's * worth keeping track. * * A failure can occur here (see #79) if there's a cascading * replication configuration like: * * X--> Y -> Z * | ^ * | | * \---------/ * * where the direct and indirect connections from X to Z use different * replication sets so as not to conflict, and where Y and Z are on the * same PostgreSQL instance. In this case our attempt to advance the * replication identifier here will ERROR because it's already in use * for the direct connection from X to Z. So don't do that. */ if (remote_origin_id != InvalidRepOriginId && remote_origin_id != replorigin_session_origin) { #if PG_VERSION_NUM >= 90500 Relation replorigin_rel; #endif elog(DEBUG3, "advancing origin oid %u for forwarded row to %X/%X", remote_origin_id, (uint32)(XactLastCommitEnd>>32), (uint32)XactLastCommitEnd); #if PG_VERSION_NUM >= 90500 replorigin_rel = table_open(ReplicationOriginRelationId, RowExclusiveLock); #endif replorigin_advance(remote_origin_id, remote_origin_lsn, XactLastCommitEnd, false, false /* XXX ? */); #if PG_VERSION_NUM >= 90500 table_close(replorigin_rel, RowExclusiveLock); #endif } in_remote_transaction = false; /* * Stop replay if we're doing limited replay and we've replayed up to the * last record we're supposed to process. */ if (MyApplyWorker->replay_stop_lsn != InvalidXLogRecPtr && MyApplyWorker->replay_stop_lsn <= end_lsn) { ereport(LOG, (errmsg("pglogical %s finished processing; replayed to %X/%X of required %X/%X", MyPGLogicalWorker->worker_type == PGLOGICAL_WORKER_SYNC ? "sync" : "apply", (uint32)(end_lsn>>32), (uint32)end_lsn, (uint32)(MyApplyWorker->replay_stop_lsn >>32), (uint32)MyApplyWorker->replay_stop_lsn))); /* * If this is sync worker, update syncing table state to done. */ if (MyPGLogicalWorker->worker_type == PGLOGICAL_WORKER_SYNC) { StartTransactionCommand(); set_table_sync_status(MyApplyWorker->subid, NameStr(MyPGLogicalWorker->worker.sync.nspname), NameStr(MyPGLogicalWorker->worker.sync.relname), SYNC_STATUS_SYNCDONE, end_lsn); CommitTransactionCommand(); } /* * Flush all writes so the latest position can be reported back to the * sender. */ XLogFlush(GetXLogWriteRecPtr()); /* * Disconnect. * * This needs to happen before the pglogical_sync_worker_finish() * call otherwise slot drop will fail. */ PQfinish(applyconn); /* * If this is sync worker, finish it. */ if (MyPGLogicalWorker->worker_type == PGLOGICAL_WORKER_SYNC) pglogical_sync_worker_finish(); /* Stop gracefully */ proc_exit(0); } VALGRIND_PRINTF("PGLOGICAL_APPLY: commit %u\n", remote_xid); xact_action_counter = 0; remote_xid = InvalidTransactionId; process_syncing_tables(end_lsn); /* * Ensure any pending signals/self-notifies are sent out. * * Note that there is a possibility that this will result in an ERROR, * which will result in the apply worker being killed and restarted. As * the notification queues have already been flushed, the same error won't * occur again, however if errors continue, they will dramatically slow * down - but not stop - replication. */ ProcessCompletedNotifies(); pgstat_report_activity(STATE_IDLE, NULL); } /* * Handle ORIGIN message. */ static void handle_origin(StringInfo s) { char *origin; /* * ORIGIN message can only come inside remote transaction and before * any actual writes. */ if (!in_remote_transaction || IsTransactionState()) elog(ERROR, "ORIGIN message sent out of order"); /* We have to start transaction here so that we can work with origins. */ ensure_transaction(); origin = pglogical_read_origin(s, &remote_origin_lsn); remote_origin_id = replorigin_by_name(origin, true); } /* * Handle RELATION message. * * Note we don't do validation against local schema here. The validation is * posponed until first change for given relation comes. */ static void handle_relation(StringInfo s) { multi_insert_finish(); (void) pglogical_read_rel(s); } static void handle_insert(StringInfo s) { PGLogicalTupleData newtup; PGLogicalRelation *rel; bool started_tx = ensure_transaction(); PushActiveSnapshot(GetTransactionSnapshot()); errcallback_arg.action_name = "INSERT"; xact_action_counter++; rel = pglogical_read_insert(s, RowExclusiveLock, &newtup); errcallback_arg.rel = rel; /* If in list of relations which are being synchronized, skip. */ if (!should_apply_changes_for_rel(rel->nspname, rel->relname)) { pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); return; } /* Handle multi_insert capabilities. */ if (use_multi_insert) { if (rel != last_insert_rel) { multi_insert_finish(); /* Fall through to normal insert. */ } else { apply_api.multi_insert_add_tuple(rel, &newtup); last_insert_rel_cnt++; return; } } else if (pglogical_batch_inserts && RelationGetRelid(rel->rel) != QueueRelid && apply_api.can_multi_insert && apply_api.can_multi_insert(rel)) { if (rel != last_insert_rel) { last_insert_rel = rel; last_insert_rel_cnt = 0; } else if (last_insert_rel_cnt++ >= MIN_MULTI_INSERT_TUPLES) { use_multi_insert = true; last_insert_rel_cnt = 0; } } /* Normal insert. */ apply_api.do_insert(rel, &newtup); /* if INSERT was into our queue, process the message. */ if (RelationGetRelid(rel->rel) == QueueRelid) { HeapTuple ht; LockRelId lockid = rel->rel->rd_lockInfo.lockRelId; Relation qrel; multi_insert_finish(); MemoryContextSwitchTo(MessageContext); ht = heap_form_tuple(RelationGetDescr(rel->rel), newtup.values, newtup.nulls); LockRelationIdForSession(&lockid, RowExclusiveLock); pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); apply_api.on_commit(); handle_queued_message(ht, started_tx); heap_freetuple(ht); qrel = table_open(QueueRelid, RowExclusiveLock); UnlockRelationIdForSession(&lockid, RowExclusiveLock); table_close(qrel, NoLock); apply_api.on_begin(); MemoryContextSwitchTo(MessageContext); // if (oldxid != GetTopTransactionId()) // CommitTransactionCommand(); } else { pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); } } static void multi_insert_finish(void) { if (use_multi_insert && last_insert_rel_cnt) { const char *old_action = errcallback_arg.action_name; PGLogicalRelation *old_rel = errcallback_arg.rel; errcallback_arg.action_name = "multi INSERT"; errcallback_arg.rel = last_insert_rel; apply_api.multi_insert_finish(last_insert_rel); pglogical_relation_close(last_insert_rel, NoLock); use_multi_insert = false; last_insert_rel = NULL; last_insert_rel_cnt = 0; errcallback_arg.rel = old_rel; errcallback_arg.action_name = old_action; } } static void handle_update(StringInfo s) { PGLogicalTupleData oldtup; PGLogicalTupleData newtup; PGLogicalRelation *rel; bool hasoldtup; errcallback_arg.action_name = "UPDATE"; xact_action_counter++; ensure_transaction(); multi_insert_finish(); PushActiveSnapshot(GetTransactionSnapshot()); rel = pglogical_read_update(s, RowExclusiveLock, &hasoldtup, &oldtup, &newtup); errcallback_arg.rel = rel; /* If in list of relations which are being synchronized, skip. */ if (!should_apply_changes_for_rel(rel->nspname, rel->relname)) { pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); return; } apply_api.do_update(rel, hasoldtup ? &oldtup : &newtup, &newtup); pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); } static void handle_delete(StringInfo s) { PGLogicalTupleData oldtup; PGLogicalRelation *rel; memset(&errcallback_arg, 0, sizeof(struct ActionErrCallbackArg)); xact_action_counter++; ensure_transaction(); multi_insert_finish(); PushActiveSnapshot(GetTransactionSnapshot()); rel = pglogical_read_delete(s, RowExclusiveLock, &oldtup); errcallback_arg.rel = rel; /* If in list of relations which are being synchronized, skip. */ if (!should_apply_changes_for_rel(rel->nspname, rel->relname)) { pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); return; } apply_api.do_delete(rel, &oldtup); pglogical_relation_close(rel, NoLock); PopActiveSnapshot(); CommandCounterIncrement(); } inline static bool getmsgisend(StringInfo msg) { return msg->cursor == msg->len; } static void handle_startup(StringInfo s) { uint8 msgver = pq_getmsgbyte(s); if (msgver != 1) elog(ERROR, "Expected startup message version 1, but got %u", msgver); /* * The startup message consists of null-terminated strings as key/value * pairs. The first entry is always the format identifier. */ do { const char *k, *v; k = pq_getmsgstring(s); if (strlen(k) == 0) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid startup message: key has zero length"))); if (getmsgisend(s)) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid startup message: key '%s' has no following value", k))); /* It's OK to have a zero length value */ v = pq_getmsgstring(s); handle_startup_param(k, v); } while (!getmsgisend(s)); } static bool parse_bool_param(const char *key, const char *value) { bool result; if (!parse_bool(value, &result)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("couldn't parse value '%s' for key '%s' as boolean", value, key))); return result; } static void handle_startup_param(const char *key, const char *value) { elog(DEBUG2, "apply got pglogical startup msg param %s=%s", key, value); if (strcmp(key, "pg_version") == 0) elog(DEBUG1, "upstream Pg version is %s", value); if (strcmp(key, "encoding") == 0) { int encoding = pg_char_to_encoding(value); if (encoding != GetDatabaseEncoding()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("expected encoding=%s from upstream but got %s", GetDatabaseEncodingName(), value))); } if (strcmp(key, "forward_changeset_origins") == 0) { bool fwd = parse_bool_param(key, value); /* FIXME: Store this somewhere */ elog(DEBUG1, "changeset origin forwarding enabled: %s", fwd ? "t" : "f"); } /* * We just ignore a bunch of parameters here because we specify what we * require when we send our params to the upstream. It's required to ERROR * if it can't match what we asked for. It may send the startup message * first, but it'll be followed by an ERROR if it does. There's no need * to check params we can't do anything about mismatches of, like protocol * versions and type sizes. */ } static RangeVar * parse_relation_message(Jsonb *message) { JsonbIterator *it; JsonbValue v; int r; int level = 0; char *key = NULL; char **parse_res = NULL; char *nspname = NULL; char *relname = NULL; /* Parse and validate the json message. */ if (!JB_ROOT_IS_OBJECT(message)) elog(ERROR, "malformed message in queued message tuple: root is not object"); it = JsonbIteratorInit(&message->root); while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { if (level == 0 && r != WJB_BEGIN_OBJECT) elog(ERROR, "root element needs to be an object"); else if (level == 0 && r == WJB_BEGIN_OBJECT) { level++; } else if (level == 1 && r == WJB_KEY) { if (strncmp(v.val.string.val, "schema_name", v.val.string.len) == 0) parse_res = &nspname; else if (strncmp(v.val.string.val, "table_name", v.val.string.len) == 0) parse_res = &relname; else elog(ERROR, "unexpected key: %s", pnstrdup(v.val.string.val, v.val.string.len)); key = v.val.string.val; } else if (level == 1 && r == WJB_VALUE) { if (!key) elog(ERROR, "in wrong state when parsing key"); if (v.type != jbvString) elog(ERROR, "unexpected type for key '%s': %u", key, v.type); *parse_res = pnstrdup(v.val.string.val, v.val.string.len); } else if (level == 1 && r != WJB_END_OBJECT) { elog(ERROR, "unexpected content: %u at level %d", r, level); } else if (r == WJB_END_OBJECT) { level--; parse_res = NULL; key = NULL; } else elog(ERROR, "unexpected content: %u at level %d", r, level); } /* Check if we got both schema and table names. */ if (!nspname) elog(ERROR, "missing schema_name in relation message"); if (!relname) elog(ERROR, "missing table_name in relation message"); return makeRangeVar(nspname, relname, -1); } /* * Handle TRUNCATE message comming via queue table. */ static void handle_truncate(QueuedMessage *queued_message) { RangeVar *rv; /* * If table doesn't exist locally, it can't be subscribed. * * TODO: should we error here? */ rv = parse_relation_message(queued_message->message); /* If in list of relations which are being synchronized, skip. */ if (!should_apply_changes_for_rel(rv->schemaname, rv->relname)) return; truncate_table(rv->schemaname, rv->relname); } /* * Handle TABLESYNC message comming via queue table. */ static void handle_table_sync(QueuedMessage *queued_message) { RangeVar *rv; MemoryContext oldcontext; PGLogicalSyncStatus *oldsync; PGLogicalSyncStatus *newsync; rv = parse_relation_message(queued_message->message); oldsync = get_table_sync_status(MyApplyWorker->subid, rv->schemaname, rv->relname, true); if (oldsync) { elog(INFO, "table sync came from queue for table %s.%s which already being synchronized, skipping", rv->schemaname, rv->relname); return; } /* Keep the lists persistent. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); newsync = palloc0(sizeof(PGLogicalSyncStatus)); MemoryContextSwitchTo(oldcontext); newsync->kind = SYNC_KIND_DATA; newsync->subid = MyApplyWorker->subid; newsync->status = SYNC_STATUS_INIT; namestrcpy(&newsync->nspname, rv->schemaname); namestrcpy(&newsync->relname, rv->relname); create_local_sync_status(newsync); oldcontext = MemoryContextSwitchTo(TopMemoryContext); MemoryContextSwitchTo(oldcontext); MyApplyWorker->sync_pending = true; } /* * Handle SEQUENCE message comming via queue table. */ static void handle_sequence(QueuedMessage *queued_message) { Jsonb *message = queued_message->message; JsonbIterator *it; JsonbValue v; int r; int level = 0; char *key = NULL; char **parse_res = NULL; char *nspname = NULL; char *relname = NULL; char *last_value_raw = NULL; int64 last_value; Oid nspoid; Oid reloid; /* Parse and validate the json message. */ if (!JB_ROOT_IS_OBJECT(message)) elog(ERROR, "malformed message in queued message tuple: root is not object"); it = JsonbIteratorInit(&message->root); while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { if (level == 0 && r != WJB_BEGIN_OBJECT) elog(ERROR, "root element needs to be an object"); else if (level == 0 && r == WJB_BEGIN_OBJECT) { level++; } else if (level == 1 && r == WJB_KEY) { if (strncmp(v.val.string.val, "schema_name", v.val.string.len) == 0) parse_res = &nspname; else if (strncmp(v.val.string.val, "sequence_name", v.val.string.len) == 0) parse_res = &relname; else if (strncmp(v.val.string.val, "last_value", v.val.string.len) == 0) parse_res = &last_value_raw; else elog(ERROR, "unexpected key: %s", pnstrdup(v.val.string.val, v.val.string.len)); key = v.val.string.val; } else if (level == 1 && r == WJB_VALUE) { if (!key) elog(ERROR, "in wrong state when parsing key"); if (v.type != jbvString) elog(ERROR, "unexpected type for key '%s': %u", key, v.type); *parse_res = pnstrdup(v.val.string.val, v.val.string.len); } else if (level == 1 && r != WJB_END_OBJECT) { elog(ERROR, "unexpected content: %u at level %d", r, level); } else if (r == WJB_END_OBJECT) { level--; parse_res = NULL; key = NULL; } else elog(ERROR, "unexpected content: %u at level %d", r, level); } /* Check if we got both schema and table names. */ if (!nspname) elog(ERROR, "missing schema_name in sequence message"); if (!relname) elog(ERROR, "missing table_name in sequence message"); if (!last_value_raw) elog(ERROR, "missing last_value in sequence message"); nspoid = get_namespace_oid(nspname, false); reloid = get_relname_relid(relname, nspoid); scanint8(last_value_raw, false, &last_value); DirectFunctionCall2(setval_oid, ObjectIdGetDatum(reloid), Int64GetDatum(last_value)); } /* * Handle SQL message comming via queue table. */ static void handle_sql(QueuedMessage *queued_message, bool tx_just_started) { JsonbIterator *it; JsonbValue v; int r; char *sql; /* Validate the json and extract the SQL string from it. */ if (!JB_ROOT_IS_SCALAR(queued_message->message)) elog(ERROR, "malformed message in queued message tuple: root is not scalar"); it = JsonbIteratorInit(&queued_message->message->root); r = JsonbIteratorNext(&it, &v, false); if (r != WJB_BEGIN_ARRAY) elog(ERROR, "malformed message in queued message tuple, item type %d expected %d", r, WJB_BEGIN_ARRAY); r = JsonbIteratorNext(&it, &v, false); if (r != WJB_ELEM) elog(ERROR, "malformed message in queued message tuple, item type %d expected %d", r, WJB_ELEM); if (v.type != jbvString) elog(ERROR, "malformed message in queued message tuple, expected value type %d got %d", jbvString, v.type); sql = pnstrdup(v.val.string.val, v.val.string.len); r = JsonbIteratorNext(&it, &v, false); if (r != WJB_END_ARRAY) elog(ERROR, "malformed message in queued message tuple, item type %d expected %d", r, WJB_END_ARRAY); r = JsonbIteratorNext(&it, &v, false); if (r != WJB_DONE) elog(ERROR, "malformed message in queued message tuple, item type %d expected %d", r, WJB_DONE); /* Run the extracted SQL. */ pglogical_execute_sql_command(sql, queued_message->role, tx_just_started); } /* * Handles messages comming from the queue. */ static void handle_queued_message(HeapTuple msgtup, bool tx_just_started) { QueuedMessage *queued_message; const char *old_action_name; old_action_name = errcallback_arg.action_name; errcallback_arg.is_ddl_or_drop = true; queued_message = queued_message_from_tuple(msgtup); switch (queued_message->message_type) { case QUEUE_COMMAND_TYPE_SQL: errcallback_arg.action_name = "QUEUED_SQL"; handle_sql(queued_message, tx_just_started); break; case QUEUE_COMMAND_TYPE_TRUNCATE: errcallback_arg.action_name = "QUEUED_TRUNCATE"; handle_truncate(queued_message); break; case QUEUE_COMMAND_TYPE_TABLESYNC: errcallback_arg.action_name = "QUEUED_TABLESYNC"; handle_table_sync(queued_message); break; case QUEUE_COMMAND_TYPE_SEQUENCE: errcallback_arg.action_name = "QUEUED_SEQUENCE"; handle_sequence(queued_message); break; default: elog(ERROR, "unknown message type '%c'", queued_message->message_type); } errcallback_arg.action_name = old_action_name; errcallback_arg.is_ddl_or_drop = false; } static void replication_handler(StringInfo s) { ErrorContextCallback errcallback; char action = pq_getmsgbyte(s); memset(&errcallback_arg, 0, sizeof(struct ActionErrCallbackArg)); errcallback.callback = action_error_callback; errcallback.arg = &errcallback_arg; errcallback.previous = error_context_stack; error_context_stack = &errcallback; Assert(CurrentMemoryContext == MessageContext); switch (action) { /* BEGIN */ case 'B': handle_begin(s); break; /* COMMIT */ case 'C': handle_commit(s); break; /* ORIGIN */ case 'O': handle_origin(s); break; /* RELATION */ case 'R': handle_relation(s); break; /* INSERT */ case 'I': handle_insert(s); break; /* UPDATE */ case 'U': handle_update(s); break; /* DELETE */ case 'D': handle_delete(s); break; /* STARTUP MESSAGE */ case 'S': handle_startup(s); break; default: elog(ERROR, "unknown action of type %c", action); } Assert(CurrentMemoryContext == MessageContext); if (error_context_stack == &errcallback) error_context_stack = errcallback.previous; if (action == 'C') { /* * We clobber MessageContext on commit. It doesn't matter much when we * do it so long as we do so periodically, to prevent the context from * growing too much. We might want to clean it up even 'n'th message * too, but that adds testing burden and isn't done for now. */ MemoryContextReset(MessageContext); } } /* * Figure out which write/flush positions to report to the walsender process. * * We can't simply report back the last LSN the walsender sent us because the * local transaction might not yet be flushed to disk locally. Instead we * build a list that associates local with remote LSNs for every commit. When * reporting back the flush position to the sender we iterate that list and * check which entries on it are already locally flushed. Those we can report * as having been flushed. * * Returns true if there's no outstanding transactions that need to be * flushed. */ static bool get_flush_position(XLogRecPtr *write, XLogRecPtr *flush) { dlist_mutable_iter iter; XLogRecPtr local_flush = GetFlushRecPtr(); *write = InvalidXLogRecPtr; *flush = InvalidXLogRecPtr; dlist_foreach_modify(iter, &lsn_mapping) { PGLFlushPosition *pos = dlist_container(PGLFlushPosition, node, iter.cur); *write = pos->remote_end; if (pos->local_end <= local_flush) { *flush = pos->remote_end; dlist_delete(iter.cur); pfree(pos); } else { /* * Don't want to uselessly iterate over the rest of the list which * could potentially be long. Instead get the last element and * grab the write position from there. */ pos = dlist_tail_element(PGLFlushPosition, node, &lsn_mapping); *write = pos->remote_end; return false; } } return dlist_is_empty(&lsn_mapping); } /* * Send a Standby Status Update message to server. * * 'recvpos' is the latest LSN we've received data to, force is set if we need * to send a response to avoid timeouts. */ static bool send_feedback(PGconn *conn, XLogRecPtr recvpos, int64 now, bool force) { static StringInfo reply_message = NULL; static XLogRecPtr last_recvpos = InvalidXLogRecPtr; static XLogRecPtr last_writepos = InvalidXLogRecPtr; static XLogRecPtr last_flushpos = InvalidXLogRecPtr; XLogRecPtr writepos; XLogRecPtr flushpos; /* It's legal to not pass a recvpos */ if (recvpos < last_recvpos) recvpos = last_recvpos; if (get_flush_position(&writepos, &flushpos)) { /* * No outstanding transactions to flush, we can report the latest * received position. This is important for synchronous replication. */ flushpos = writepos = recvpos; } if (writepos < last_writepos) writepos = last_writepos; if (flushpos < last_flushpos) flushpos = last_flushpos; /* if we've already reported everything we're good */ if (!force && writepos == last_writepos && flushpos == last_flushpos) return true; if (!reply_message) { MemoryContext oldcontext = MemoryContextSwitchTo(TopMemoryContext); reply_message = makeStringInfo(); MemoryContextSwitchTo(oldcontext); } else resetStringInfo(reply_message); pq_sendbyte(reply_message, 'r'); pq_sendint64(reply_message, recvpos); /* write */ pq_sendint64(reply_message, flushpos); /* flush */ pq_sendint64(reply_message, writepos); /* apply */ pq_sendint64(reply_message, now); /* sendTime */ pq_sendbyte(reply_message, false); /* replyRequested */ elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X", force, (uint32) (recvpos >> 32), (uint32) recvpos, (uint32) (writepos >> 32), (uint32) writepos, (uint32) (flushpos >> 32), (uint32) flushpos ); if (PQputCopyData(conn, reply_message->data, reply_message->len) <= 0 || PQflush(conn)) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not send feedback packet: %s", PQerrorMessage(conn)))); return false; } if (recvpos > last_recvpos) last_recvpos = recvpos; if (writepos > last_writepos) last_writepos = writepos; if (flushpos > last_flushpos) last_flushpos = flushpos; return true; } /* * Apply main loop. */ void apply_work(PGconn *streamConn) { int fd; char *copybuf = NULL; XLogRecPtr last_received = InvalidXLogRecPtr; applyconn = streamConn; fd = PQsocket(applyconn); /* Init the MessageContext which we use for easier cleanup. */ MessageContext = AllocSetContextCreate(TopMemoryContext, "MessageContext", ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(MessageContext); /* mark as idle, before starting to loop */ pgstat_report_activity(STATE_IDLE, NULL); Assert(CurrentMemoryContext == MessageContext); while (!got_SIGTERM) { int rc; int r; /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ rc = WaitLatchOrSocket(&MyProc->procLatch, WL_SOCKET_READABLE | WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, fd, 1000L); ResetLatch(&MyProc->procLatch); Assert(CurrentMemoryContext == MessageContext); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); if (rc & WL_SOCKET_READABLE) PQconsumeInput(applyconn); if (PQstatus(applyconn) == CONNECTION_BAD) { elog(ERROR, "connection to other side has died"); } Assert(CurrentMemoryContext == MessageContext); for (;;) { if (got_SIGTERM) break; /* We must not have fallen out of MessageContext by accident */ Assert(CurrentMemoryContext == MessageContext); Assert(copybuf == NULL); r = PQgetCopyData(applyconn, ©buf, 1); if (r == -1) { elog(ERROR, "data stream ended"); } else if (r == -2) { elog(ERROR, "could not read COPY data: %s", PQerrorMessage(applyconn)); } else if (r < 0) elog(ERROR, "invalid COPY status %d", r); else if (r == 0) { /* need to wait for new data */ break; } else { int c; StringInfoData s; /* * We're using a StringInfo to wrap existing data here, as a * cursor. We init it manually to avoid a redundant allocation. */ memset(&s, 0, sizeof(StringInfoData)); s.data = copybuf; s.len = r; s.maxlen = -1; s.cursor = 0; c = pq_getmsgbyte(&s); if (c == 'w') { XLogRecPtr start_lsn; XLogRecPtr end_lsn; start_lsn = pq_getmsgint64(&s); end_lsn = pq_getmsgint64(&s); pq_getmsgint64(&s); /* sendTime */ if (last_received < start_lsn) last_received = start_lsn; if (last_received < end_lsn) last_received = end_lsn; replication_handler(&s); } else if (c == 'k') { XLogRecPtr endpos; bool reply_requested; endpos = pq_getmsgint64(&s); /* timestamp = */ pq_getmsgint64(&s); reply_requested = pq_getmsgbyte(&s); send_feedback(applyconn, endpos, GetCurrentTimestamp(), reply_requested); if (last_received < endpos) last_received = endpos; } /* other message types are purposefully ignored */ /* copybuf is malloc'd not palloc'd */ if (copybuf != NULL) { PQfreemem(copybuf); copybuf = NULL; } } /* We must not have fallen out of MessageContext by accident */ Assert(CurrentMemoryContext == MessageContext); } /* confirm all writes at once */ send_feedback(applyconn, last_received, GetCurrentTimestamp(), false); if (!in_remote_transaction) process_syncing_tables(last_received); /* We must not have switched out of MessageContext by mistake */ Assert(CurrentMemoryContext == MessageContext); /* Cleanup the memory. */ MemoryContextResetAndDeleteChildren(MessageContext); /* * Only do a leak check if we're between txns; we don't want lots of * noise due to resources that only exist in a txn. */ if (!IsTransactionState()) { VALGRIND_DO_ADDED_LEAK_CHECK; } } } /* * Add context to the errors produced by pglogical_execute_sql_command(). */ static void execute_sql_command_error_cb(void *arg) { errcontext("during execution of queued SQL statement: %s", (char *) arg); } /* * Execute an SQL command. This can be multiple multiple queries. */ void pglogical_execute_sql_command(char *cmdstr, char *role, bool isTopLevel) { const char *save_debug_query_string = debug_query_string; List *commands; ListCell *command_i; #ifdef PGXC List *commandSourceQueries; ListCell *commandSourceQuery_i; #endif MemoryContext oldcontext; ErrorContextCallback errcallback; oldcontext = MemoryContextSwitchTo(MessageContext); errcallback.callback = execute_sql_command_error_cb; errcallback.arg = cmdstr; errcallback.previous = error_context_stack; error_context_stack = &errcallback; debug_query_string = cmdstr; /* * XL distributes individual statements using just executing them as plain * SQL query and can't handle multistatements this way so we need to get * individual statements using API provided by XL itself. */ #ifdef PGXC commands = pg_parse_query_get_source(cmdstr, &commandSourceQueries); #else commands = pg_parse_query(cmdstr); #endif MemoryContextSwitchTo(oldcontext); /* * Do a limited amount of safety checking against CONCURRENTLY commands * executed in situations where they aren't allowed. The sender side should * provide protection, but better be safe than sorry. */ isTopLevel = isTopLevel && (list_length(commands) == 1); #ifdef PGXC forboth(command_i, commands, commandSourceQuery_i, commandSourceQueries) #else foreach(command_i, commands) #endif { List *plantree_list; List *querytree_list; RawStmt *command = (RawStmt *) lfirst(command_i); CommandTag commandTag; Portal portal; int save_nestlevel; DestReceiver *receiver; #ifdef PGXC cmdstr = (char *) lfirst(commandSourceQuery_i); errcallback.arg = cmdstr; #endif /* temporarily push snapshot for parse analysis/planning */ PushActiveSnapshot(GetTransactionSnapshot()); oldcontext = MemoryContextSwitchTo(MessageContext); /* * Set the current role to the user that executed the command on the * origin server. */ save_nestlevel = NewGUCNestLevel(); SetConfigOption("role", role, PGC_INTERNAL, PGC_S_OVERRIDE); commandTag = CreateCommandTag(command); querytree_list = pg_analyze_and_rewrite( command, cmdstr, NULL, 0); plantree_list = pg_plan_queries( querytree_list, cmdstr, 0, NULL); PopActiveSnapshot(); portal = CreatePortal("pglogical", true, true); PortalDefineQuery(portal, NULL, cmdstr, commandTag, plantree_list, NULL); PortalStart(portal, NULL, 0, InvalidSnapshot); receiver = CreateDestReceiver(DestNone); (void) PortalRun(portal, FETCH_ALL, isTopLevel, receiver, receiver, NULL); (*receiver->rDestroy) (receiver); PortalDrop(portal, false); CommandCounterIncrement(); /* * Restore the GUC variables we set above. */ AtEOXact_GUC(true, save_nestlevel); MemoryContextSwitchTo(oldcontext); } /* protect against stack resets during CONCURRENTLY processing */ if (error_context_stack == &errcallback) error_context_stack = errcallback.previous; debug_query_string = save_debug_query_string; } /* * Load list of tables currently pending sync. * * Must be inside transaction. */ static void reread_unsynced_tables(Oid subid) { MemoryContext saved_ctx; List *unsynced_tables; ListCell *lc; /* Cleanup first. */ list_free_deep(SyncingTables); SyncingTables = NIL; /* Read new state. */ unsynced_tables = get_unsynced_tables(subid); saved_ctx = MemoryContextSwitchTo(TopMemoryContext); foreach (lc, unsynced_tables) { PGLogicalSyncStatus *sync = palloc(sizeof(PGLogicalSyncStatus)); memcpy(sync, lfirst(lc), sizeof(PGLogicalSyncStatus)); SyncingTables = lappend(SyncingTables, sync); } MemoryContextSwitchTo(saved_ctx); } static void process_syncing_tables(XLogRecPtr end_lsn) { ListCell *lc; Assert(CurrentMemoryContext == MessageContext); Assert(!IsTransactionState()); /* First check if we need to update the cached information. */ if (MyApplyWorker->sync_pending) { StartTransactionCommand(); MyApplyWorker->sync_pending = false; reread_unsynced_tables(MyApplyWorker->subid); CommitTransactionCommand(); MemoryContextSwitchTo(MessageContext); } /* Process currently pending sync tables. */ if (list_length(SyncingTables) > 0) { #if PG_VERSION_NUM < 130000 ListCell *prev = NULL; ListCell *next; #endif #if PG_VERSION_NUM >= 130000 foreach(lc, SyncingTables) #else for (lc = list_head(SyncingTables); lc; lc = next) #endif { PGLogicalSyncStatus *sync = (PGLogicalSyncStatus *) lfirst(lc); PGLogicalSyncStatus *newsync; #if PG_VERSION_NUM < 130000 /* We might delete the cell so advance it now. */ next = lnext(lc); #endif StartTransactionCommand(); newsync = get_table_sync_status(MyApplyWorker->subid, NameStr(sync->nspname), NameStr(sync->relname), true); /* * TODO: what to do here? We don't really want to die, * but this can mean many things, for now we just assume table is * not relevant for us anymore and leave fixing to the user. * * The reason why this part happens in transaction is that the * memory allocated for sync info will get automatically cleaned * afterwards. */ if (!newsync) { sync->status = SYNC_STATUS_READY; sync->statuslsn = InvalidXLogRecPtr; } else memcpy(sync, newsync, sizeof(PGLogicalSyncStatus)); CommitTransactionCommand(); MemoryContextSwitchTo(MessageContext); if (sync->status == SYNC_STATUS_SYNCWAIT) { PGLogicalWorker *worker; LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); worker = pglogical_sync_find(MyDatabaseId, MyApplyWorker->subid, NameStr(sync->nspname), NameStr(sync->relname)); if (pglogical_worker_running(worker) && end_lsn >= worker->worker.apply.replay_stop_lsn) { worker->worker.apply.replay_stop_lsn = end_lsn; sync->status = SYNC_STATUS_CATCHUP; StartTransactionCommand(); set_table_sync_status(MyApplyWorker->subid, NameStr(sync->nspname), NameStr(sync->relname), sync->status, sync->statuslsn); CommitTransactionCommand(); MemoryContextSwitchTo(MessageContext); if (pglogical_worker_running(worker)) SetLatch(&worker->proc->procLatch); LWLockRelease(PGLogicalCtx->lock); if (wait_for_sync_status_change(MyApplyWorker->subid, NameStr(sync->nspname), NameStr(sync->relname), SYNC_STATUS_SYNCDONE, &sync->statuslsn)) sync->status = SYNC_STATUS_SYNCDONE; } else LWLockRelease(PGLogicalCtx->lock); } if (sync->status == SYNC_STATUS_SYNCDONE && end_lsn >= sync->statuslsn) { sync->status = SYNC_STATUS_READY; sync->statuslsn = end_lsn; StartTransactionCommand(); set_table_sync_status(MyApplyWorker->subid, NameStr(sync->nspname), NameStr(sync->relname), sync->status, sync->statuslsn); CommitTransactionCommand(); MemoryContextSwitchTo(MessageContext); } /* Ready? Remove it from local cache. */ if (sync->status == SYNC_STATUS_READY) { #if PG_VERSION_NUM >= 130000 SyncingTables = foreach_delete_current(SyncingTables, lc); #else SyncingTables = list_delete_cell(SyncingTables, lc, prev); #endif pfree(sync); } else { #if PG_VERSION_NUM < 130000 prev = lc; #endif } } } /* * If there are still pending tables for synchronization, launch the sync * worker. */ foreach (lc, SyncingTables) { List *workers; ListCell *wlc; int nworkers = 0; PGLogicalSyncStatus *sync = (PGLogicalSyncStatus *) lfirst(lc); if (sync->status == SYNC_STATUS_SYNCDONE || sync->status == SYNC_STATUS_READY) continue; LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); workers = pglogical_sync_find_all(MyDatabaseId, MyApplyWorker->subid); foreach (wlc, workers) { PGLogicalWorker *worker = (PGLogicalWorker *) lfirst(wlc); if (pglogical_worker_running(worker)) nworkers++; } LWLockRelease(PGLogicalCtx->lock); if (nworkers < 1) { start_sync_worker(&sync->nspname, &sync->relname); break; } } Assert(CurrentMemoryContext == MessageContext); } static void start_sync_worker(Name nspname, Name relname) { PGLogicalWorker worker; /* Start the sync worker. */ memset(&worker, 0, sizeof(PGLogicalWorker)); worker.worker_type = PGLOGICAL_WORKER_SYNC; worker.dboid = MyPGLogicalWorker->dboid; worker.worker.apply.subid = MyApplyWorker->subid; worker.worker.apply.sync_pending = false; /* Makes no sense for sync worker. */ /* Tell the worker to stop at current position. */ worker.worker.sync.apply.replay_stop_lsn = replorigin_session_origin_lsn; memcpy(&worker.worker.sync.nspname, nspname, sizeof(NameData)); memcpy(&worker.worker.sync.relname, relname, sizeof(NameData)); (void) pglogical_worker_register(&worker); } static inline TimeOffset interval_to_timeoffset(const Interval *interval) { TimeOffset span; span = interval->time; #ifdef HAVE_INT64_TIMESTAMP span += interval->month * INT64CONST(30) * USECS_PER_DAY; span += interval->day * INT64CONST(24) * USECS_PER_HOUR; #else span += interval->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY); span += interval->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR); #endif return span; } void pglogical_apply_main(Datum main_arg) { int slot = DatumGetInt32(main_arg); PGconn *streamConn; RepOriginId originid; XLogRecPtr origin_startpos; MemoryContext saved_ctx; char *repsets; char *origins; /* Setup shmem. */ pglogical_worker_attach(slot, PGLOGICAL_WORKER_APPLY); Assert(MyPGLogicalWorker->worker_type == PGLOGICAL_WORKER_APPLY); MyApplyWorker = &MyPGLogicalWorker->worker.apply; /* Establish signal handlers. */ pqsignal(SIGTERM, handle_sigterm); /* Attach to dsm segment. */ Assert(CurrentResourceOwner == NULL); CurrentResourceOwner = ResourceOwnerCreate(NULL, "pglogical apply"); /* Load correct apply API. */ if (pglogical_use_spi) { if (pglogical_conflict_resolver != PGLOGICAL_RESOLVE_ERROR) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("pglogical.use_spi can only be used when " "pglogical.conflict_resolution is set to 'error'"))); apply_api.on_begin = pglogical_apply_spi_begin; apply_api.on_commit = pglogical_apply_spi_commit; apply_api.do_insert = pglogical_apply_spi_insert; apply_api.do_update = pglogical_apply_spi_update; apply_api.do_delete = pglogical_apply_spi_delete; apply_api.can_multi_insert = pglogical_apply_spi_can_mi; apply_api.multi_insert_add_tuple = pglogical_apply_spi_mi_add_tuple; apply_api.multi_insert_finish = pglogical_apply_spi_mi_finish; } /* Setup synchronous commit according to the user's wishes */ SetConfigOption("synchronous_commit", pglogical_synchronous_commit ? "local" : "off", PGC_BACKEND, PGC_S_OVERRIDE); /* other context? */ /* Run as replica session replication role. */ SetConfigOption("session_replication_role", "replica", PGC_SUSET, PGC_S_OVERRIDE); /* other context? */ /* * Disable function body checks during replay. That's necessary because a) * the creator of the function might have had it disabled b) the function * might be search_path dependant and we don't fix the contents of * functions. */ SetConfigOption("check_function_bodies", "off", PGC_INTERNAL, PGC_S_OVERRIDE); /* Load the subscription. */ StartTransactionCommand(); saved_ctx = MemoryContextSwitchTo(TopMemoryContext); MySubscription = get_subscription(MyApplyWorker->subid); MemoryContextSwitchTo(saved_ctx); #ifdef XCP /* * When runnin under XL, initialise the XL executor so that the datanode * and coordinator information is initialised properly. */ InitMultinodeExecutor(false); #endif CommitTransactionCommand(); elog(LOG, "starting apply for subscription %s", MySubscription->name); /* Set apply delay if any. */ if (MySubscription->apply_delay) apply_delay = interval_to_timeoffset(MySubscription->apply_delay) / 1000; /* If the subscription isn't initialized yet, initialize it. */ pglogical_sync_subscription(MySubscription); elog(DEBUG1, "connecting to provider %s, dsn %s", MySubscription->origin->name, MySubscription->origin_if->dsn); /* * Cache the queue relation id. * TODO: invalidation */ StartTransactionCommand(); QueueRelid = get_queue_table_oid(); originid = replorigin_by_name(MySubscription->slot_name, false); elog(DEBUG2, "setting up replication origin %s (oid %u)", MySubscription->slot_name, originid); replorigin_session_setup(originid); replorigin_session_origin = originid; origin_startpos = replorigin_session_get_progress(false); /* Start the replication. */ streamConn = pglogical_connect_replica(MySubscription->origin_if->dsn, MySubscription->name, NULL); repsets = stringlist_to_identifierstr(MySubscription->replication_sets); origins = stringlist_to_identifierstr(MySubscription->forward_origins); /* * IDENTIFY_SYSTEM sets up some internal state on walsender so call it even * if we don't (yet) want to use any of the results. */ pglogical_identify_system(streamConn, NULL, NULL, NULL, NULL); pglogical_start_replication(streamConn, MySubscription->slot_name, origin_startpos, origins, repsets, NULL, MySubscription->force_text_transfer); pfree(repsets); CommitTransactionCommand(); /* * Do an initial leak check with reporting off; we don't want to see * these results, just the later output from ADDED leak checks. */ VALGRIND_DISABLE_ERROR_REPORTING; VALGRIND_DO_LEAK_CHECK; VALGRIND_ENABLE_ERROR_REPORTING; apply_work(streamConn); PQfinish(streamConn); /* We should only get here if we received sigTERM */ proc_exit(0); } pglogical-REL2_4_1/pglogical_apply.h000066400000000000000000000022451415142317000175040ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply.h * pglogical apply functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_apply.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_APPLY_H #define PGLOGICAL_APPLY_H #include "pglogical_relcache.h" #include "pglogical_proto_native.h" typedef void (*pglogical_apply_begin_fn) (void); typedef void (*pglogical_apply_commit_fn) (void); typedef void (*pglogical_apply_insert_fn) (PGLogicalRelation *rel, PGLogicalTupleData *newtup); typedef void (*pglogical_apply_update_fn) (PGLogicalRelation *rel, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup); typedef void (*pglogical_apply_delete_fn) (PGLogicalRelation *rel, PGLogicalTupleData *oldtup); typedef bool (*pglogical_apply_can_mi_fn) (PGLogicalRelation *rel); typedef void (*pglogical_apply_mi_add_tuple_fn) (PGLogicalRelation *rel, PGLogicalTupleData *tup); typedef void (*pglogical_apply_mi_finish_fn) (PGLogicalRelation *rel); #endif /* PGLOGICAL_APPLY_H */ pglogical-REL2_4_1/pglogical_apply_heap.c000066400000000000000000000662061415142317000205030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply_heap.c * pglogical apply functions using heap api * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_apply_heap.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "libpq-fe.h" #include "pgstat.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/namespace.h" #include "commands/dbcommands.h" #include "commands/sequence.h" #include "commands/tablecmds.h" #include "commands/trigger.h" #include "executor/executor.h" #include "libpq/pqformat.h" #include "mb/pg_wchar.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "optimizer/clauses.h" #if PG_VERSION_NUM >= 120000 #include "optimizer/optimizer.h" #else #include "optimizer/planner.h" #endif #include "replication/origin.h" #include "replication/reorderbuffer.h" #include "rewrite/rewriteHandler.h" #include "storage/ipc.h" #include "storage/lmgr.h" #include "storage/proc.h" #include "tcop/pquery.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/int8.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/snapmgr.h" #include "pglogical_conflict.h" #include "pglogical_executor.h" #include "pglogical_node.h" #include "pglogical_proto_native.h" #include "pglogical_queue.h" #include "pglogical_relcache.h" #include "pglogical_repset.h" #include "pglogical_rpc.h" #include "pglogical_sync.h" #include "pglogical_worker.h" #include "pglogical_apply_heap.h" typedef struct ApplyExecState { EState *estate; EPQState epqstate; ResultRelInfo *resultRelInfo; TupleTableSlot *slot; } ApplyExecState; /* State related to bulk insert */ typedef struct ApplyMIState { PGLogicalRelation *rel; ApplyExecState *aestate; CommandId cid; BulkInsertState bistate; #if PG_VERSION_NUM >= 120000 TupleTableSlot **buffered_tuples; #else HeapTuple *buffered_tuples; #endif int maxbuffered_tuples; int nbuffered_tuples; } ApplyMIState; #if PG_VERSION_NUM >= 120000 #define TTS_TUP(slot) (((HeapTupleTableSlot *)slot)->tuple) #else #define TTS_TUP(slot) (slot->tts_tuple) #endif static ApplyMIState *pglmistate = NULL; void pglogical_apply_heap_begin(void) { } void pglogical_apply_heap_commit(void) { } static List * UserTableUpdateOpenIndexes(ResultRelInfo *relinfo, EState *estate, TupleTableSlot *slot, bool update) { List *recheckIndexes = NIL; if (relinfo->ri_NumIndices > 0) { recheckIndexes = ExecInsertIndexTuples( #if PG_VERSION_NUM >= 140000 relinfo, #endif slot, #if PG_VERSION_NUM < 120000 &slot->tts_tuple->t_self, #endif estate #if PG_VERSION_NUM >= 140000 , update #endif #if PG_VERSION_NUM >= 90500 , false, NULL, NIL #endif ); /* FIXME: recheck the indexes */ if (recheckIndexes != NIL) { StringInfoData si; ListCell *lc; const char *idxname, *relname, *nspname; Relation target_rel = relinfo->ri_RelationDesc; relname = RelationGetRelationName(target_rel); nspname = get_namespace_name(RelationGetNamespace(target_rel)); initStringInfo(&si); foreach (lc, recheckIndexes) { Oid idxoid = lfirst_oid(lc); idxname = get_rel_name(idxoid); if (idxname == NULL) elog(ERROR, "cache lookup failed for index oid %u", idxoid); if (si.len > 0) appendStringInfoString(&si, ", "); appendStringInfoString(&si, quote_identifier(idxname)); } ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("pglogical doesn't support deferrable indexes"), errdetail("relation %s.%s has deferrable indexes: %s", quote_identifier(nspname), quote_identifier(relname), si.data))); } list_free(recheckIndexes); } return recheckIndexes; } static bool physatt_in_attmap(PGLogicalRelation *rel, int attid) { AttrNumber i; for (i = 0; i < rel->natts; i++) if (rel->attmap[i] == attid) return true; return false; } /* * Executes default values for columns for which we didn't get any data. * * TODO: this needs caching, it's not exactly fast. */ static void fill_missing_defaults(PGLogicalRelation *rel, EState *estate, PGLogicalTupleData *tuple) { TupleDesc desc = RelationGetDescr(rel->rel); AttrNumber num_phys_attrs = desc->natts; int i; AttrNumber attnum, num_defaults = 0; int *defmap; ExprState **defexprs; ExprContext *econtext; econtext = GetPerTupleExprContext(estate); /* We got all the data via replication, no need to evaluate anything. */ if (num_phys_attrs == rel->natts) return; defmap = (int *) palloc(num_phys_attrs * sizeof(int)); defexprs = (ExprState **) palloc(num_phys_attrs * sizeof(ExprState *)); for (attnum = 0; attnum < num_phys_attrs; attnum++) { Expr *defexpr; if (TupleDescAttr(desc,attnum)->attisdropped) continue; if (physatt_in_attmap(rel, attnum)) continue; defexpr = (Expr *) build_column_default(rel->rel, attnum + 1); if (defexpr != NULL) { /* Run the expression through planner */ defexpr = expression_planner(defexpr); /* Initialize executable expression in copycontext */ defexprs[num_defaults] = ExecInitExpr(defexpr, NULL); defmap[num_defaults] = attnum; num_defaults++; } } for (i = 0; i < num_defaults; i++) tuple->values[defmap[i]] = ExecEvalExpr(defexprs[i], econtext, &tuple->nulls[defmap[i]], NULL); } static ApplyExecState * init_apply_exec_state(PGLogicalRelation *rel) { ApplyExecState *aestate = palloc0(sizeof(ApplyExecState)); /* Initialize the executor state. */ aestate->estate = create_estate_for_relation(rel->rel, true); aestate->resultRelInfo = makeNode(ResultRelInfo); InitResultRelInfo(aestate->resultRelInfo, rel->rel, 1, 0); #if PG_VERSION_NUM < 140000 aestate->estate->es_result_relations = aestate->resultRelInfo; aestate->estate->es_num_result_relations = 1; aestate->estate->es_result_relation_info = aestate->resultRelInfo; #endif aestate->slot = ExecInitExtraTupleSlot(aestate->estate); ExecSetSlotDescriptor(aestate->slot, RelationGetDescr(rel->rel)); if (aestate->resultRelInfo->ri_TrigDesc) EvalPlanQualInit(&aestate->epqstate, aestate->estate, NULL, NIL, -1); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); return aestate; } static void finish_apply_exec_state(ApplyExecState *aestate) { /* Close indexes */ ExecCloseIndices(aestate->resultRelInfo); /* Handle queued AFTER triggers. */ AfterTriggerEndQuery(aestate->estate); /* Terminate EPQ execution if active. */ if (aestate->resultRelInfo->ri_TrigDesc) EvalPlanQualEnd(&aestate->epqstate); /* Cleanup tuple table. */ ExecResetTupleTable(aestate->estate->es_tupleTable, true); /* Free the memory. */ FreeExecutorState(aestate->estate); pfree(aestate); } /* * Handle insert via low level api. */ void pglogical_apply_heap_insert(PGLogicalRelation *rel, PGLogicalTupleData *newtup) { ApplyExecState *aestate; Oid conflicts_idx_id; TupleTableSlot *localslot; HeapTuple remotetuple; HeapTuple applytuple; PGLogicalConflictResolution resolution; List *recheckIndexes = NIL; MemoryContext oldctx; bool has_before_triggers = false; /* Initialize the executor state. */ aestate = init_apply_exec_state(rel); #if PG_VERSION_NUM >= 120000 localslot = table_slot_create(rel->rel, &aestate->estate->es_tupleTable); #else localslot = ExecInitExtraTupleSlot(aestate->estate); ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->rel)); #endif ExecOpenIndices(aestate->resultRelInfo #if PG_VERSION_NUM >= 90500 , false #endif ); /* * Check for existing tuple with same key in any unique index containing * only normal columns. This doesn't just check the replica identity index, * but it'll prefer it and use it first. */ conflicts_idx_id = pglogical_tuple_find_conflict(aestate->resultRelInfo, newtup, localslot); /* Process and store remote tuple in the slot */ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(aestate->estate)); fill_missing_defaults(rel, aestate->estate, newtup); remotetuple = heap_form_tuple(RelationGetDescr(rel->rel), newtup->values, newtup->nulls); MemoryContextSwitchTo(oldctx); ExecStoreHeapTuple(remotetuple, aestate->slot, true); if (aestate->resultRelInfo->ri_TrigDesc && aestate->resultRelInfo->ri_TrigDesc->trig_insert_before_row) { has_before_triggers = true; #if PG_VERSION_NUM >= 120000 if (!ExecBRInsertTriggers(aestate->estate, aestate->resultRelInfo, aestate->slot)) #else aestate->slot = ExecBRInsertTriggers(aestate->estate, aestate->resultRelInfo, aestate->slot); if (aestate->slot == NULL) /* "do nothing" */ #endif { finish_apply_exec_state(aestate); return; } } /* trigger might have changed tuple */ #if PG_VERSION_NUM >= 120000 remotetuple = ExecFetchSlotHeapTuple(aestate->slot, true, NULL); #else remotetuple = ExecMaterializeSlot(aestate->slot); #endif /* Did we find matching key in any candidate-key index? */ if (OidIsValid(conflicts_idx_id)) { TransactionId xmin; TimestampTz local_ts; RepOriginId local_origin; bool apply; bool local_origin_found; local_origin_found = get_tuple_origin(TTS_TUP(localslot), &xmin, &local_origin, &local_ts); /* Tuple already exists, try resolving conflict. */ apply = try_resolve_conflict(rel->rel, TTS_TUP(localslot), remotetuple, &applytuple, &resolution); pglogical_report_conflict(CONFLICT_INSERT_INSERT, rel, TTS_TUP(localslot), NULL, remotetuple, applytuple, resolution, xmin, local_origin_found, local_origin, local_ts, conflicts_idx_id, has_before_triggers); if (apply) { #if PG_VERSION_NUM >= 120000 bool update_indexes; #endif if (applytuple != remotetuple) ExecStoreHeapTuple(applytuple, aestate->slot, false); if (aestate->resultRelInfo->ri_TrigDesc && aestate->resultRelInfo->ri_TrigDesc->trig_update_before_row) { #if PG_VERSION_NUM >= 120000 if (!ExecBRUpdateTriggers(aestate->estate, &aestate->epqstate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot)) #else aestate->slot = ExecBRUpdateTriggers(aestate->estate, &aestate->epqstate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot); if (aestate->slot == NULL) /* "do nothing" */ #endif { finish_apply_exec_state(aestate); return; } } /* trigger might have changed tuple */ #if PG_VERSION_NUM >= 120000 remotetuple = ExecFetchSlotHeapTuple(aestate->slot, true, NULL); #else remotetuple = ExecMaterializeSlot(aestate->slot); #endif /* Check the constraints of the tuple */ if (rel->rel->rd_att->constr) ExecConstraints(aestate->resultRelInfo, aestate->slot, aestate->estate); #if PG_VERSION_NUM >= 120000 simple_table_tuple_update(rel->rel, &(localslot->tts_tid), aestate->slot, aestate->estate->es_snapshot, &update_indexes); if (update_indexes) #else simple_heap_update(rel->rel, &(TTS_TUP(localslot)->t_self), TTS_TUP(aestate->slot)); if (!HeapTupleIsHeapOnly(TTS_TUP(aestate->slot))) #endif recheckIndexes = UserTableUpdateOpenIndexes(aestate->resultRelInfo, aestate->estate, aestate->slot, true); /* AFTER ROW UPDATE Triggers */ #if PG_VERSION_NUM >= 120000 ExecARUpdateTriggers(aestate->estate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot, recheckIndexes); #else ExecARUpdateTriggers(aestate->estate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, applytuple, recheckIndexes); #endif } } else { /* Check the constraints of the tuple */ if (rel->rel->rd_att->constr) ExecConstraints(aestate->resultRelInfo, aestate->slot, aestate->estate); #if PG_VERSION_NUM >= 120000 simple_table_tuple_insert(aestate->resultRelInfo->ri_RelationDesc, aestate->slot); #else simple_heap_insert(rel->rel, TTS_TUP(aestate->slot)); #endif UserTableUpdateOpenIndexes(aestate->resultRelInfo, aestate->estate, aestate->slot, false); /* AFTER ROW INSERT Triggers */ #if PG_VERSION_NUM >= 120000 ExecARInsertTriggers(aestate->estate, aestate->resultRelInfo, aestate->slot, recheckIndexes); #else ExecARInsertTriggers(aestate->estate, aestate->resultRelInfo, remotetuple, recheckIndexes); #endif } finish_apply_exec_state(aestate); CommandCounterIncrement(); } /* * Handle update via low level api. */ void pglogical_apply_heap_update(PGLogicalRelation *rel, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup) { ApplyExecState *aestate; bool found; TupleTableSlot *localslot; HeapTuple remotetuple; List *recheckIndexes = NIL; MemoryContext oldctx; Oid replident_idx_id; bool has_before_triggers = false; /* Initialize the executor state. */ aestate = init_apply_exec_state(rel); #if PG_VERSION_NUM >= 120000 localslot = table_slot_create(rel->rel, &aestate->estate->es_tupleTable); #else localslot = ExecInitExtraTupleSlot(aestate->estate); ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->rel)); #endif /* Search for existing tuple with same key */ found = pglogical_tuple_find_replidx(aestate->resultRelInfo, oldtup, localslot, &replident_idx_id); /* * Tuple found, update the local tuple. * * Note this will fail if there are other unique indexes and one or more of * them would be violated by the new tuple. */ if (found) { TransactionId xmin; TimestampTz local_ts; RepOriginId local_origin; bool local_origin_found; bool apply; HeapTuple applytuple; /* Process and store remote tuple in the slot */ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(aestate->estate)); fill_missing_defaults(rel, aestate->estate, newtup); remotetuple = heap_modify_tuple(TTS_TUP(localslot), RelationGetDescr(rel->rel), newtup->values, newtup->nulls, newtup->changed); MemoryContextSwitchTo(oldctx); ExecStoreHeapTuple(remotetuple, aestate->slot, true); if (aestate->resultRelInfo->ri_TrigDesc && aestate->resultRelInfo->ri_TrigDesc->trig_update_before_row) { has_before_triggers = true; #if PG_VERSION_NUM >= 120000 if (!ExecBRUpdateTriggers(aestate->estate, &aestate->epqstate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot)) #else aestate->slot = ExecBRUpdateTriggers(aestate->estate, &aestate->epqstate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot); if (aestate->slot == NULL) /* "do nothing" */ #endif { finish_apply_exec_state(aestate); return; } } /* trigger might have changed tuple */ #if PG_VERSION_NUM >= 120000 remotetuple = ExecFetchSlotHeapTuple(aestate->slot, true, NULL); #else remotetuple = ExecMaterializeSlot(aestate->slot); #endif local_origin_found = get_tuple_origin(TTS_TUP(localslot), &xmin, &local_origin, &local_ts); /* * If the local tuple was previously updated by different transaction * on different server, consider this to be conflict and resolve it. */ if (local_origin_found && xmin != GetTopTransactionId() && local_origin != replorigin_session_origin) { PGLogicalConflictResolution resolution; apply = try_resolve_conflict(rel->rel, TTS_TUP(localslot), remotetuple, &applytuple, &resolution); pglogical_report_conflict(CONFLICT_UPDATE_UPDATE, rel, TTS_TUP(localslot), oldtup, remotetuple, applytuple, resolution, xmin, local_origin_found, local_origin, local_ts, replident_idx_id, has_before_triggers); if (applytuple != remotetuple) ExecStoreHeapTuple(applytuple, aestate->slot, false); } else { apply = true; applytuple = remotetuple; } if (apply) { #if PG_VERSION_NUM >= 120000 bool update_indexes; #endif /* Check the constraints of the tuple */ if (rel->rel->rd_att->constr) ExecConstraints(aestate->resultRelInfo, aestate->slot, aestate->estate); #if PG_VERSION_NUM >= 120000 simple_table_tuple_update(rel->rel, &(localslot->tts_tid), aestate->slot, aestate->estate->es_snapshot, &update_indexes); if (update_indexes) #else simple_heap_update(rel->rel, &(TTS_TUP(localslot)->t_self), TTS_TUP(aestate->slot)); /* Only update indexes if it's not HOT update. */ if (!HeapTupleIsHeapOnly(TTS_TUP(aestate->slot))) #endif { ExecOpenIndices(aestate->resultRelInfo #if PG_VERSION_NUM >= 90500 , false #endif ); recheckIndexes = UserTableUpdateOpenIndexes(aestate->resultRelInfo, aestate->estate, aestate->slot, true); } /* AFTER ROW UPDATE Triggers */ #if PG_VERSION_NUM >= 120000 ExecARUpdateTriggers(aestate->estate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, aestate->slot, recheckIndexes); #else ExecARUpdateTriggers(aestate->estate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL, applytuple, recheckIndexes); #endif } } else { /* * The tuple to be updated could not be found. * * We can't do INSERT here because we might not have whole tuple. */ remotetuple = heap_form_tuple(RelationGetDescr(rel->rel), newtup->values, newtup->nulls); pglogical_report_conflict(CONFLICT_UPDATE_DELETE, rel, NULL, oldtup, remotetuple, NULL, PGLogicalResolution_Skip, InvalidTransactionId, false, InvalidRepOriginId, (TimestampTz)0, replident_idx_id, has_before_triggers); } /* Cleanup. */ finish_apply_exec_state(aestate); CommandCounterIncrement(); } /* * Handle delete via low level api. */ void pglogical_apply_heap_delete(PGLogicalRelation *rel, PGLogicalTupleData *oldtup) { ApplyExecState *aestate; TupleTableSlot *localslot; Oid replident_idx_id; bool has_before_triggers = false; /* Initialize the executor state. */ aestate = init_apply_exec_state(rel); #if PG_VERSION_NUM >= 120000 localslot = table_slot_create(rel->rel, &aestate->estate->es_tupleTable); #else localslot = ExecInitExtraTupleSlot(aestate->estate); ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->rel)); #endif if (pglogical_tuple_find_replidx(aestate->resultRelInfo, oldtup, localslot, &replident_idx_id)) { if (aestate->resultRelInfo->ri_TrigDesc && aestate->resultRelInfo->ri_TrigDesc->trig_delete_before_row) { bool dodelete = ExecBRDeleteTriggers(aestate->estate, &aestate->epqstate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL); has_before_triggers = true; if (!dodelete) /* "do nothing" */ { finish_apply_exec_state(aestate); return; } } /* Tuple found, delete it. */ simple_heap_delete(rel->rel, &(TTS_TUP(localslot)->t_self)); /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(aestate->estate, aestate->resultRelInfo, &(TTS_TUP(localslot)->t_self), NULL); } else { /* The tuple to be deleted could not be found. */ HeapTuple remotetuple = heap_form_tuple(RelationGetDescr(rel->rel), oldtup->values, oldtup->nulls); pglogical_report_conflict(CONFLICT_DELETE_DELETE, rel, NULL, oldtup, remotetuple, NULL, PGLogicalResolution_Skip, InvalidTransactionId, false, InvalidRepOriginId, (TimestampTz)0, replident_idx_id, has_before_triggers); } /* Cleanup. */ finish_apply_exec_state(aestate); CommandCounterIncrement(); } bool pglogical_apply_heap_can_mi(PGLogicalRelation *rel) { /* Multi insert is only supported when conflicts result in errors. */ return pglogical_conflict_resolver == PGLOGICAL_RESOLVE_ERROR; } /* * MultiInsert initialization. */ static void pglogical_apply_heap_mi_start(PGLogicalRelation *rel) { MemoryContext oldctx; ApplyExecState *aestate; ResultRelInfo *resultRelInfo; TupleDesc desc; bool volatile_defexprs = false; if (pglmistate && pglmistate->rel == rel) return; if (pglmistate && pglmistate->rel != rel) pglogical_apply_heap_mi_finish(pglmistate->rel); oldctx = MemoryContextSwitchTo(TopTransactionContext); /* Initialize new MultiInsert state. */ pglmistate = palloc0(sizeof(ApplyMIState)); pglmistate->rel = rel; /* Initialize the executor state. */ pglmistate->aestate = aestate = init_apply_exec_state(rel); MemoryContextSwitchTo(TopTransactionContext); resultRelInfo = aestate->resultRelInfo; ExecOpenIndices(resultRelInfo #if PG_VERSION_NUM >= 90500 , false #endif ); /* Check if table has any volatile default expressions. */ desc = RelationGetDescr(rel->rel); if (desc->natts != rel->natts) { int attnum; for (attnum = 0; attnum < desc->natts; attnum++) { Expr *defexpr; if (TupleDescAttr(desc,attnum)->attisdropped) continue; defexpr = (Expr *) build_column_default(rel->rel, attnum + 1); if (defexpr != NULL) { /* Run the expression through planner */ defexpr = expression_planner(defexpr); volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr); if (volatile_defexprs) break; } } } /* * Decide if to buffer tuples based on the collected information * about the table. */ if ((resultRelInfo->ri_TrigDesc != NULL && (resultRelInfo->ri_TrigDesc->trig_insert_before_row || resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || volatile_defexprs) { pglmistate->maxbuffered_tuples = 1; } else { pglmistate->maxbuffered_tuples = 1000; } pglmistate->cid = GetCurrentCommandId(true); pglmistate->bistate = GetBulkInsertState(); /* Make the space for buffer. */ #if PG_VERSION_NUM >= 120000 pglmistate->buffered_tuples = palloc0(pglmistate->maxbuffered_tuples * sizeof(TupleTableSlot *)); #else pglmistate->buffered_tuples = palloc0(pglmistate->maxbuffered_tuples * sizeof(HeapTuple)); #endif pglmistate->nbuffered_tuples = 0; MemoryContextSwitchTo(oldctx); } /* Write the buffered tuples. */ static void pglogical_apply_heap_mi_flush(void) { MemoryContext oldctx; ResultRelInfo *resultRelInfo; int i; if (!pglmistate || pglmistate->nbuffered_tuples == 0) return; oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(pglmistate->aestate->estate)); heap_multi_insert(pglmistate->rel->rel, pglmistate->buffered_tuples, pglmistate->nbuffered_tuples, pglmistate->cid, 0, /* hi_options */ pglmistate->bistate); MemoryContextSwitchTo(oldctx); resultRelInfo = pglmistate->aestate->resultRelInfo; /* * If there are any indexes, update them for all the inserted tuples, and * run AFTER ROW INSERT triggers. */ if (resultRelInfo->ri_NumIndices > 0) { for (i = 0; i < pglmistate->nbuffered_tuples; i++) { List *recheckIndexes = NIL; #if PG_VERSION_NUM < 120000 ExecStoreTuple(pglmistate->buffered_tuples[i], pglmistate->aestate->slot, InvalidBuffer, false); #endif recheckIndexes = ExecInsertIndexTuples( #if PG_VERSION_NUM >= 140000 resultRelInfo, #endif #if PG_VERSION_NUM >= 120000 pglmistate->buffered_tuples[i], #else pglmistate->aestate->slot, &(pglmistate->buffered_tuples[i]->t_self), #endif pglmistate->aestate->estate #if PG_VERSION_NUM >= 90500 #if PG_VERSION_NUM >= 140000 , false #endif , false, NULL, NIL #endif ); ExecARInsertTriggers(pglmistate->aestate->estate, resultRelInfo, pglmistate->buffered_tuples[i], recheckIndexes); list_free(recheckIndexes); } } /* * There's no indexes, but see if we need to run AFTER ROW INSERT triggers * anyway. */ else if (resultRelInfo->ri_TrigDesc != NULL && resultRelInfo->ri_TrigDesc->trig_insert_after_row) { for (i = 0; i < pglmistate->nbuffered_tuples; i++) { ExecARInsertTriggers(pglmistate->aestate->estate, resultRelInfo, pglmistate->buffered_tuples[i], NIL); } } pglmistate->nbuffered_tuples = 0; } /* Add tuple to the MultiInsert. */ void pglogical_apply_heap_mi_add_tuple(PGLogicalRelation *rel, PGLogicalTupleData *tup) { MemoryContext oldctx; ApplyExecState *aestate; HeapTuple remotetuple; TupleTableSlot *slot; pglogical_apply_heap_mi_start(rel); /* * If sufficient work is pending, process that first */ if (pglmistate->nbuffered_tuples >= pglmistate->maxbuffered_tuples) pglogical_apply_heap_mi_flush(); /* Process and store remote tuple in the slot */ aestate = pglmistate->aestate; if (pglmistate->nbuffered_tuples == 0) { /* * Reset the per-tuple exprcontext. We can only do this if the * tuple buffer is empty. (Calling the context the per-tuple * memory context is a bit of a misnomer now.) */ ResetPerTupleExprContext(aestate->estate); } oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(aestate->estate)); fill_missing_defaults(rel, aestate->estate, tup); remotetuple = heap_form_tuple(RelationGetDescr(rel->rel), tup->values, tup->nulls); MemoryContextSwitchTo(TopTransactionContext); slot = aestate->slot; /* Store the tuple in slot, but make sure it's not freed. */ ExecStoreHeapTuple(remotetuple, slot, false); if (aestate->resultRelInfo->ri_TrigDesc && aestate->resultRelInfo->ri_TrigDesc->trig_insert_before_row) { #if PG_VERSION_NUM >= 120000 if (!ExecBRInsertTriggers(aestate->estate, aestate->resultRelInfo, slot)) #else slot = ExecBRInsertTriggers(aestate->estate, aestate->resultRelInfo, slot); if (slot == NULL) #endif { MemoryContextSwitchTo(oldctx); return; } #if PG_VERSION_NUM < 120000 else remotetuple = ExecMaterializeSlot(slot); #endif } /* Check the constraints of the tuple */ if (rel->rel->rd_att->constr) ExecConstraints(aestate->resultRelInfo, slot, aestate->estate); #if PG_VERSION_NUM >= 120000 if (pglmistate->buffered_tuples[pglmistate->nbuffered_tuples] == NULL) pglmistate->buffered_tuples[pglmistate->nbuffered_tuples] = table_slot_create(rel->rel, NULL); else ExecClearTuple(pglmistate->buffered_tuples[pglmistate->nbuffered_tuples]); ExecCopySlot(pglmistate->buffered_tuples[pglmistate->nbuffered_tuples], slot); #else pglmistate->buffered_tuples[pglmistate->nbuffered_tuples] = remotetuple; #endif pglmistate->nbuffered_tuples++; MemoryContextSwitchTo(oldctx); } void pglogical_apply_heap_mi_finish(PGLogicalRelation *rel) { if (!pglmistate) return; Assert(pglmistate->rel == rel); pglogical_apply_heap_mi_flush(); FreeBulkInsertState(pglmistate->bistate); finish_apply_exec_state(pglmistate->aestate); #if PG_VERSION_NUM >= 120000 for (int i = 0; i < pglmistate->maxbuffered_tuples; i++) if (pglmistate->buffered_tuples[i]) ExecDropSingleTupleTableSlot(pglmistate->buffered_tuples[i]); #endif pfree(pglmistate->buffered_tuples); pfree(pglmistate); pglmistate = NULL; } pglogical-REL2_4_1/pglogical_apply_heap.h000066400000000000000000000022271415142317000205010ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply_heap.h * pglogical apply functions using heap api * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_apply_heap.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_APPLY_HEAP_H #define PGLOGICAL_APPLY_HEAP_H #include "pglogical_relcache.h" #include "pglogical_proto_native.h" extern void pglogical_apply_heap_begin(void); extern void pglogical_apply_heap_commit(void); extern void pglogical_apply_heap_insert(PGLogicalRelation *rel, PGLogicalTupleData *newtup); extern void pglogical_apply_heap_update(PGLogicalRelation *rel, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup); extern void pglogical_apply_heap_delete(PGLogicalRelation *rel, PGLogicalTupleData *oldtup); bool pglogical_apply_heap_can_mi(PGLogicalRelation *rel); void pglogical_apply_heap_mi_add_tuple(PGLogicalRelation *rel, PGLogicalTupleData *tup); void pglogical_apply_heap_mi_finish(PGLogicalRelation *rel); #endif /* PGLOGICAL_APPLY_HEAP_H */ pglogical-REL2_4_1/pglogical_apply_spi.c000066400000000000000000000444211415142317000203540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply_spi.c * pglogical apply functions using SPI * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_apply_spi.c * * NOTES * The multi-insert support is not done through SPI but using binary * COPY through a pipe (virtual or file). * *------------------------------------------------------------------------- */ #include #include #include "postgres.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "commands/copy.h" #include "executor/executor.h" #include "executor/spi.h" #include "replication/reorderbuffer.h" #include "storage/fd.h" #include "tcop/pquery.h" #include "tcop/utility.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/builtins.h" #include "pglogical.h" #include "pglogical_apply_spi.h" #include "pglogical_conflict.h" /* State related to bulk insert */ typedef struct pglogical_copyState { PGLogicalRelation *rel; StringInfo copy_stmt; List *copy_parsetree; File copy_file; char copy_mechanism; FILE *copy_read_file; FILE *copy_write_file; StringInfo msgbuf; MemoryContext rowcontext; FmgrInfo *out_functions; List *attnumlist; int copy_buffered_tuples; size_t copy_buffered_size; } pglogical_copyState; static pglogical_copyState *pglcstate = NULL; static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; static void pglogical_start_copy(PGLogicalRelation *rel); static void pglogical_proccess_copy(pglogical_copyState *pglcstate); static void pglogical_copySendData(pglogical_copyState *pglcstate, const void *databuf, int datasize); static void pglogical_copySendEndOfRow(pglogical_copyState *pglcstate); static void pglogical_copySendInt32(pglogical_copyState *pglcstate, int32 val); static void pglogical_copySendInt16(pglogical_copyState *pglcstate, int16 val); static void pglogical_copyOneRowTo(pglogical_copyState *pglcstate, Datum *values, bool *nulls); /* * Handle begin (connect SPI). */ void pglogical_apply_spi_begin(void) { if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); MemoryContextSwitchTo(MessageContext); } /* * Handle commit (finish SPI). */ void pglogical_apply_spi_commit(void) { if (SPI_finish() != SPI_OK_FINISH) elog(ERROR, "SPI_finish failed"); MemoryContextSwitchTo(MessageContext); } /* * Handle insert via SPI. */ void pglogical_apply_spi_insert(PGLogicalRelation *rel, PGLogicalTupleData *newtup) { TupleDesc desc = RelationGetDescr(rel->rel); Oid argtypes[MaxTupleAttributeNumber]; Datum values[MaxTupleAttributeNumber]; char nulls[MaxTupleAttributeNumber]; StringInfoData cmd; int att, narg; initStringInfo(&cmd); appendStringInfo(&cmd, "INSERT INTO %s (", quote_qualified_identifier(rel->nspname, rel->relname)); for (att = 0, narg = 0; att < desc->natts; att++) { if (TupleDescAttr(desc,att)->attisdropped) continue; if (!newtup->changed[att]) continue; if (narg > 0) appendStringInfo(&cmd, ", %s", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname))); else appendStringInfo(&cmd, "%s", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname))); narg++; } appendStringInfoString(&cmd, ") VALUES ("); for (att = 0, narg = 0; att < desc->natts; att++) { if (TupleDescAttr(desc,att)->attisdropped) continue; if (!newtup->changed[att]) continue; if (narg > 0) appendStringInfo(&cmd, ", $%u", narg + 1); else appendStringInfo(&cmd, "$%u", narg + 1); argtypes[narg] = TupleDescAttr(desc,att)->atttypid; values[narg] = newtup->values[att]; nulls[narg] = newtup->nulls[att] ? 'n' : ' '; narg++; } appendStringInfoString(&cmd, ")"); if (SPI_execute_with_args(cmd.data, narg, argtypes, values, nulls, false, 0) != SPI_OK_INSERT) elog(ERROR, "SPI_execute_with_args failed"); MemoryContextSwitchTo(MessageContext); pfree(cmd.data); } /* * Handle update via SPI. */ void pglogical_apply_spi_update(PGLogicalRelation *rel, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup) { TupleDesc desc = RelationGetDescr(rel->rel); Oid argtypes[MaxTupleAttributeNumber]; Datum values[MaxTupleAttributeNumber]; char nulls[MaxTupleAttributeNumber]; StringInfoData cmd; Bitmapset *id_attrs; int att, narg, firstarg; id_attrs = RelationGetIndexAttrBitmap(rel->rel, INDEX_ATTR_BITMAP_IDENTITY_KEY); initStringInfo(&cmd); appendStringInfo(&cmd, "UPDATE %s SET ", quote_qualified_identifier(rel->nspname, rel->relname)); for (att = 0, narg = 0; att < desc->natts; att++) { if (TupleDescAttr(desc,att)->attisdropped) continue; if (!newtup->changed[att]) continue; if (narg > 0) appendStringInfo(&cmd, ", %s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); else appendStringInfo(&cmd, "%s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); argtypes[narg] = TupleDescAttr(desc,att)->atttypid; values[narg] = newtup->values[att]; nulls[narg] = newtup->nulls[att] ? 'n' : ' '; narg++; } appendStringInfoString(&cmd, " WHERE"); firstarg = narg; for (att = 0; att < desc->natts; att++) { if (!bms_is_member(TupleDescAttr(desc,att)->attnum - FirstLowInvalidHeapAttributeNumber, id_attrs)) continue; if (narg > firstarg) appendStringInfo(&cmd, " AND %s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); else appendStringInfo(&cmd, " %s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); argtypes[narg] = TupleDescAttr(desc,att)->atttypid; values[narg] = oldtup->values[att]; nulls[narg] = oldtup->nulls[att] ? 'n' : ' '; narg++; } if (SPI_execute_with_args(cmd.data, narg, argtypes, values, nulls, false, 0) != SPI_OK_UPDATE) elog(ERROR, "SPI_execute_with_args failed"); MemoryContextSwitchTo(MessageContext); pfree(cmd.data); } /* * Handle delete via SPI. */ void pglogical_apply_spi_delete(PGLogicalRelation *rel, PGLogicalTupleData *oldtup) { TupleDesc desc = RelationGetDescr(rel->rel); Oid argtypes[MaxTupleAttributeNumber]; Datum values[MaxTupleAttributeNumber]; char nulls[MaxTupleAttributeNumber]; StringInfoData cmd; Bitmapset *id_attrs; int att, narg; id_attrs = RelationGetIndexAttrBitmap(rel->rel, INDEX_ATTR_BITMAP_IDENTITY_KEY); initStringInfo(&cmd); appendStringInfo(&cmd, "DELETE FROM %s WHERE", quote_qualified_identifier(rel->nspname, rel->relname)); for (att = 0, narg = 0; att < desc->natts; att++) { if (!bms_is_member(TupleDescAttr(desc,att)->attnum - FirstLowInvalidHeapAttributeNumber, id_attrs)) continue; if (narg > 0) appendStringInfo(&cmd, " AND %s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); else appendStringInfo(&cmd, " %s = $%u", quote_identifier(NameStr(TupleDescAttr(desc,att)->attname)), narg + 1); argtypes[narg] = TupleDescAttr(desc,att)->atttypid; values[narg] = oldtup->values[att]; nulls[narg] = oldtup->nulls[att] ? 'n' : ' '; narg++; } if (SPI_execute_with_args(cmd.data, narg, argtypes, values, nulls, false, 0) != SPI_OK_DELETE) elog(ERROR, "SPI_execute_with_args failed"); MemoryContextSwitchTo(MessageContext); pfree(cmd.data); } /* We currently can't support multi insert using COPY on windows. */ #if !defined(WIN32) && !defined(PGL_NO_STDIN_ASSIGN) bool pglogical_apply_spi_can_mi(PGLogicalRelation *rel) { /* Multi insert is only supported when conflicts result in errors. */ return pglogical_conflict_resolver == PGLOGICAL_RESOLVE_ERROR; } void pglogical_apply_spi_mi_add_tuple(PGLogicalRelation *rel, PGLogicalTupleData *tup) { Datum *values; bool *nulls; /* Start COPY if not already done so */ pglogical_start_copy(rel); #define MAX_BUFFERED_TUPLES 10000 #define MAX_BUFFER_SIZE 60000 /* * If sufficient work is pending, process that first */ if (pglcstate->copy_buffered_tuples > MAX_BUFFERED_TUPLES || pglcstate->copy_buffered_size > MAX_BUFFER_SIZE) { pglogical_apply_spi_mi_finish(rel); pglogical_start_copy(rel); } /* * Write the tuple to the COPY stream. */ values = (Datum *) tup->values; nulls = (bool *) tup->nulls; pglogical_copyOneRowTo(pglcstate, values, nulls); } /* * Initialize copy state for reation. */ static void pglogical_start_copy(PGLogicalRelation *rel) { MemoryContext oldcontext; TupleDesc desc; ListCell *cur; int num_phys_attrs; char *delim; StringInfoData attrnames; int i; /* We are already doing COPY for requested relation, nothing to do. */ if (pglcstate && pglcstate->rel == rel) return; /* We are in COPY but for different relation, finish it first. */ if (pglcstate && pglcstate->rel != rel) pglogical_apply_spi_mi_finish(pglcstate->rel); oldcontext = MemoryContextSwitchTo(TopTransactionContext); /* Initialize new COPY state. */ pglcstate = palloc0(sizeof(pglogical_copyState)); pglcstate->copy_file = -1; pglcstate->msgbuf = makeStringInfo(); pglcstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext, "COPY TO", ALLOCSET_DEFAULT_SIZES); pglcstate->rel = rel; for (i = 0; i < rel->natts; i++) pglcstate->attnumlist = lappend_int(pglcstate->attnumlist, rel->attmap[i]); desc = RelationGetDescr(rel->rel); num_phys_attrs = desc->natts; /* Get info about the columns we need to process. */ pglcstate->out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo)); /* Get attribute list in a CSV form */ initStringInfo(&attrnames); delim = ""; /* * Now that we have a list of attributes from the remote side and their * mapping to our side, build a COPY statement that can be parsed and * executed later to bulk load the incoming tuples. */ foreach(cur, pglcstate->attnumlist) { int attnum = lfirst_int(cur); Oid out_func_oid; bool isvarlena; getTypeBinaryOutputInfo(TupleDescAttr(desc,attnum)->atttypid, &out_func_oid, &isvarlena); fmgr_info(out_func_oid, &pglcstate->out_functions[attnum]); appendStringInfo(&attrnames, "%s %s", delim, quote_identifier(NameStr(TupleDescAttr(desc,attnum)->attname))); delim = ", "; } pglcstate->copy_stmt = makeStringInfo(); appendStringInfo(pglcstate->copy_stmt, "COPY %s.%s (%s) FROM STDIN " "WITH (FORMAT BINARY)", quote_identifier(rel->nspname), quote_identifier(rel->relname), attrnames.data); pfree(attrnames.data); /* * This is a bit of kludge to let COPY FROM read from the STDIN. In * pglogical, the apply worker is accumulating tuples received from the * publisher and queueing them for a bulk load. But the COPY API can only * deal with either a file or a PROGRAM or STDIN. * * We could either use pipe-based implementation where the apply worker * first writes to one end of the pipe and later reads from the other end. * But pipe's internal buffer is limited in size and hence we cannot * accumulate much data without writing it out to the table. * * The temporary file based implementation is more flexible. The only * disadvantage being that the data may get written to the disk and that * may cause performance issues. * * A more ideal solution would be to teach COPY to write to and read from a * buffer. But that will require changes to the in-core COPY * infrastructure. Instead, we setup things such that a pipe is created * between STDIN and a unnamed stream. The tuples are written to the one * end of the pipe and read back from the other end. Since we can fiddle * with the existing STDIN, we assign the read end of the pipe to STDIN. * * This seems ok since the apply worker being a background worker is not * going to read anything from the STDIN normally. So our highjacking of * the stream seems ok. */ if (pglcstate->copy_file == -1) pglcstate->copy_file = OpenTemporaryFile(true); Assert(pglcstate->copy_file > 0); pglcstate->copy_write_file = fopen(FilePathName(pglcstate->copy_file), "w"); pglcstate->copy_read_file = fopen(FilePathName(pglcstate->copy_file), "r"); pglcstate->copy_mechanism = 'f'; pglcstate->copy_parsetree = pg_parse_query(pglcstate->copy_stmt->data); MemoryContextSwitchTo(oldcontext); pglogical_copySendData(pglcstate, BinarySignature, sizeof(BinarySignature)); pglogical_copySendInt32(pglcstate, 0); pglogical_copySendInt32(pglcstate, 0); } static void pglogical_proccess_copy(pglogical_copyState *pglcstate) { uint64 processed; FILE *save_stdin; if (!pglcstate->copy_parsetree || !pglcstate->copy_buffered_tuples) return; /* * First send a file trailer so that when DoCopy is run below, it sees an * end of the file marker and terminates COPY once all queued tuples are * processed. We also close the file descriptor because DoCopy expects to * see a real EOF too */ pglogical_copySendInt16(pglcstate, -1); /* Also ensure that the data is flushed to the stream */ pglogical_copySendEndOfRow(pglcstate); /* * Now close the write end of the pipe so that DoCopy sees end of the * stream. * * XXX This is really sad because ideally we would have liked to keep the * pipe open and use that for next batch of bulk copy. But given the way * COPY protocol currently works, we don't have any other option but to * close the stream. */ fflush(pglcstate->copy_write_file); fclose(pglcstate->copy_write_file); pglcstate->copy_write_file = NULL; /* * The COPY statement previously crafted will read from STDIN. So we * override the 'stdin' stream to point to the read end of the pipe created * for this relation. Before that we save the current 'stdin' stream and * restore it back when the COPY is done */ save_stdin = stdin; stdin = pglcstate->copy_read_file; /* COPY may call into SPI (triggers, ...) and we already are in SPI. */ SPI_push(); /* Initiate the actual COPY */ #if PG_VERSION_NUM >= 100000 PGLDoCopy((CopyStmt*)((RawStmt *)linitial(pglcstate->copy_parsetree))->stmt, pglcstate->copy_stmt->data, &processed); #else PGLDoCopy((CopyStmt *) linitial(pglcstate->copy_parsetree), pglcstate->copy_stmt->data, &processed); #endif /* Clean up SPI state */ SPI_pop(); fclose(pglcstate->copy_read_file); pglcstate->copy_read_file = NULL; stdin = save_stdin; /* Ensure we processed correct number of tuples */ Assert(processed == pglcstate->copy_buffered_tuples); list_free_deep(pglcstate->copy_parsetree); pglcstate->copy_parsetree = NIL; pglcstate->copy_buffered_tuples = 0; pglcstate->copy_buffered_size = 0; CommandCounterIncrement(); } void pglogical_apply_spi_mi_finish(PGLogicalRelation *rel) { if (!pglcstate) return; Assert(pglcstate->rel == rel); pglogical_proccess_copy(pglcstate); if (pglcstate->copy_stmt) { pfree(pglcstate->copy_stmt->data); pfree(pglcstate->copy_stmt); } if (pglcstate->attnumlist) list_free(pglcstate->attnumlist); if (pglcstate->copy_file != -1) FileClose(pglcstate->copy_file); if (pglcstate->copy_write_file) fclose(pglcstate->copy_write_file); if (pglcstate->copy_read_file) fclose(pglcstate->copy_read_file); if (pglcstate->msgbuf) { pfree(pglcstate->msgbuf->data); pfree(pglcstate->msgbuf); } if (pglcstate->rowcontext) { MemoryContextDelete(pglcstate->rowcontext); pglcstate->rowcontext = NULL; } pfree(pglcstate); pglcstate = NULL; } /* * pglogical_copySendInt32 sends an int32 in network byte order */ static void pglogical_copySendInt32(pglogical_copyState *pglcstate, int32 val) { uint32 buf; buf = htonl((uint32) val); pglogical_copySendData(pglcstate, &buf, sizeof(buf)); } /* * pglogical_copySendInt16 sends an int16 in network byte order */ static void pglogical_copySendInt16(pglogical_copyState *pglcstate, int16 val) { uint16 buf; buf = htons((uint16) val); pglogical_copySendData(pglcstate, &buf, sizeof(buf)); } /*---------- * pglogical_copySendData sends output data to the destination (file or frontend) * pglogical_copySendEndOfRow does the appropriate thing at end of each data row * (data is not actually flushed except by pglogical_copySendEndOfRow) * * NB: no data conversion is applied by these functions *---------- */ static void pglogical_copySendData(pglogical_copyState *pglcstate, const void *databuf, int datasize) { appendBinaryStringInfo(pglcstate->msgbuf, databuf, datasize); } static void pglogical_copySendEndOfRow(pglogical_copyState *pglcstate) { StringInfo msgbuf = pglcstate->msgbuf; if (fwrite(msgbuf->data, msgbuf->len, 1, pglcstate->copy_write_file) != 1 || ferror(pglcstate->copy_write_file)) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to COPY file: %m"))); } resetStringInfo(msgbuf); } /* * Emit one row during CopyTo(). */ static void pglogical_copyOneRowTo(pglogical_copyState *pglcstate, Datum *values, bool *nulls) { FmgrInfo *out_functions = pglcstate->out_functions; MemoryContext oldcontext; ListCell *cur; MemoryContextReset(pglcstate->rowcontext); oldcontext = MemoryContextSwitchTo(pglcstate->rowcontext); /* Binary per-tuple header */ pglogical_copySendInt16(pglcstate, list_length(pglcstate->attnumlist)); foreach(cur, pglcstate->attnumlist) { int attnum = lfirst_int(cur); Datum value = values[attnum]; bool isnull = nulls[attnum]; if (isnull) pglogical_copySendInt32(pglcstate, -1); else { bytea *outputbytes; outputbytes = SendFunctionCall(&out_functions[attnum], value); pglogical_copySendInt32(pglcstate, VARSIZE(outputbytes) - VARHDRSZ); pglogical_copySendData(pglcstate, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ); } } pglcstate->copy_buffered_tuples++; pglcstate->copy_buffered_size += pglcstate->msgbuf->len; pglogical_copySendEndOfRow(pglcstate); MemoryContextSwitchTo(oldcontext); } #else /* WIN32 */ bool pglogical_apply_spi_can_mi(PGLogicalRelation *rel) { return false; } void pglogical_apply_spi_mi_add_tuple(PGLogicalRelation *rel, PGLogicalTupleData *tup) { elog(ERROR, "pglogical_apply_spi_mi_add_tuple called unexpectedly"); } void pglogical_apply_spi_mi_finish(PGLogicalRelation *rel) { elog(ERROR, "pglogical_apply_spi_mi_finish called unexpectedly"); } #endif /* WIN32 */ pglogical-REL2_4_1/pglogical_apply_spi.h000066400000000000000000000022421415142317000203540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_apply_spi.h * pglogical apply functions using SPI * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_apply_spi.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_APPLY_SPI_H #define PGLOGICAL_APPLY_SPI_H #include "pglogical_relcache.h" #include "pglogical_proto_native.h" extern void pglogical_apply_spi_begin(void); extern void pglogical_apply_spi_commit(void); extern void pglogical_apply_spi_insert(PGLogicalRelation *rel, PGLogicalTupleData *newtup); extern void pglogical_apply_spi_update(PGLogicalRelation *rel, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup); extern void pglogical_apply_spi_delete(PGLogicalRelation *rel, PGLogicalTupleData *oldtup); extern bool pglogical_apply_spi_can_mi(PGLogicalRelation *rel); extern void pglogical_apply_spi_mi_add_tuple(PGLogicalRelation *rel, PGLogicalTupleData *tup); extern void pglogical_apply_spi_mi_finish(PGLogicalRelation *rel); #endif /* PGLOGICAL_APPLY_SPI_H */ pglogical-REL2_4_1/pglogical_conflict.c000066400000000000000000000571531415142317000201630ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_conflict.c * Functions for detecting and handling conflicts * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_conflict.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/commit_ts.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/transam.h" #include "access/xact.h" #include "catalog/pg_type.h" #include "executor/executor.h" #include "parser/parse_relation.h" #include "replication/origin.h" #include "replication/reorderbuffer.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/datetime.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #if PG_VERSION_NUM < 120000 #include "utils/tqual.h" #endif #include "utils/typcache.h" #include "pglogical_conflict.h" #include "pglogical_proto_native.h" int pglogical_conflict_resolver = PGLOGICAL_RESOLVE_APPLY_REMOTE; int pglogical_conflict_log_level = LOG; static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple); /* * Setup a ScanKey for a search in the relation 'rel' for a tuple 'key' that * is setup to match 'rel' (*NOT* idxrel!). * * Returns whether any column in the passed tuple contains a NULL for an * indexed field. */ static bool build_index_scan_key(ScanKey skey, Relation rel, Relation idxrel, PGLogicalTupleData *tup) { int attoff; Datum indclassDatum; Datum indkeyDatum; bool isnull; oidvector *opclass; int2vector *indkey; bool hasnulls = false; indclassDatum = SysCacheGetAttr(INDEXRELID, idxrel->rd_indextuple, Anum_pg_index_indclass, &isnull); Assert(!isnull); opclass = (oidvector *) DatumGetPointer(indclassDatum); indkeyDatum = SysCacheGetAttr(INDEXRELID, idxrel->rd_indextuple, Anum_pg_index_indkey, &isnull); Assert(!isnull); indkey = (int2vector *) DatumGetPointer(indkeyDatum); /* * Examine each indexed attribute to ensure the passed tuple's matching * value isn't NULL and we have an equality operator for it. */ for (attoff = 0; attoff < IndexRelationGetNumberOfKeyAttributes(idxrel); attoff++) { Oid operator; Oid opfamily; RegProcedure regop; int pkattno = attoff + 1; int mainattno = indkey->values[attoff]; Oid atttype = attnumTypeId(rel, mainattno); Oid optype = get_opclass_input_type(opclass->values[attoff]); opfamily = get_opclass_family(opclass->values[attoff]); operator = get_opfamily_member(opfamily, optype, optype, BTEqualStrategyNumber); if (!OidIsValid(operator)) elog(ERROR, "could not lookup equality operator for type %u, optype %u in opfamily %u", atttype, optype, opfamily); regop = get_opcode(operator); /* FIXME: convert type? */ ScanKeyInit(&skey[attoff], pkattno, BTEqualStrategyNumber, regop, tup->values[mainattno - 1]); skey[attoff].sk_collation = idxrel->rd_indcollation[attoff]; if (tup->nulls[mainattno - 1]) { hasnulls = true; skey[attoff].sk_flags |= SK_ISNULL; } } return hasnulls; } /* * Search the index 'idxrel' for a tuple identified by 'skey' in 'rel'. * * If a matching tuple is found lock it with lockmode, fill the slot with its * contents and return true, false is returned otherwise. */ static bool find_index_tuple(ScanKey skey, Relation rel, Relation idxrel, LockTupleMode lockmode, TupleTableSlot *slot) { #if PG_VERSION_NUM < 120000 HeapTuple scantuple; #endif bool found; IndexScanDesc scan; SnapshotData snap; TransactionId xwait; /* * We need SnapshotDirty because we're doing uniqueness lookups that must * consider rows added/updated by concurrent transactions, just like a * normal UNIQUE check does. */ InitDirtySnapshot(snap); scan = index_beginscan(rel, idxrel, &snap, IndexRelationGetNumberOfKeyAttributes(idxrel), 0); retry: found = false; index_rescan(scan, skey, IndexRelationGetNumberOfKeyAttributes(idxrel), NULL, 0); #if PG_VERSION_NUM >= 120000 if (index_getnext_slot(scan, ForwardScanDirection, slot)) #else if ((scantuple = index_getnext(scan, ForwardScanDirection)) != NULL) #endif { found = true; #if PG_VERSION_NUM < 120000 ExecStoreTuple(scantuple, slot, InvalidBuffer, false); #endif ExecMaterializeSlot(slot); /* * Did any concurrent txn affect the tuple? (See * HeapTupleSatisfiesDirty for how we get this). */ xwait = TransactionIdIsValid(snap.xmin) ? snap.xmin : snap.xmax; if (TransactionIdIsValid(xwait)) { /* Wait for the specified transaction to commit or abort */ XactLockTableWait(xwait, NULL, NULL, XLTW_None); goto retry; } } /* Matching tuple found, no concurrent txns modifying it */ if (found) { #if PG_VERSION_NUM >= 120000 TM_FailureData tmfd; TM_Result res; #else Buffer buf; HeapUpdateFailureData hufd; HTSU_Result res; HeapTupleData locktup; ItemPointerCopy(&slot->tts_tuple->t_self, &locktup.t_self); #endif PushActiveSnapshot(GetLatestSnapshot()); #if PG_VERSION_NUM >= 120000 res = table_tuple_lock(rel, &(slot->tts_tid), GetLatestSnapshot(), slot, GetCurrentCommandId(false), lockmode, LockWaitBlock, 0 /* don't follow updates */ , &tmfd); #else res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false), lockmode, false /* wait */, false /* don't follow updates */, &buf, &hufd); /* the tuple slot already has the buffer pinned */ ReleaseBuffer(buf); #endif PopActiveSnapshot(); switch (res) { #if PG_VERSION_NUM >= 120000 case TM_Ok: #else case HeapTupleMayBeUpdated: #endif /* lock was successfully acquired */ break; #if PG_VERSION_NUM >= 120000 case TM_Updated: #else case HeapTupleUpdated: #endif /* * We lost a race between when we looked up the tuple and * checked for concurrent modifying txns and when we tried to * lock the matched tuple. * * XXX: Improve handling here. */ ereport(LOG, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("concurrent update, retrying"))); goto retry; default: elog(ERROR, "unexpected HTSU_Result after locking: %u", res); break; } } index_endscan(scan); return found; } /* * Find tuple using REPLICA IDENTITY index and output it in 'oldslot' * if found. * * The index oid is also output. */ bool pglogical_tuple_find_replidx(ResultRelInfo *relinfo, PGLogicalTupleData *tuple, TupleTableSlot *oldslot, Oid *idxrelid) { Oid idxoid; Relation idxrel; ScanKeyData index_key[INDEX_MAX_KEYS]; bool found; /* Open REPLICA IDENTITY index.*/ idxoid = RelationGetReplicaIndex(relinfo->ri_RelationDesc); if (!OidIsValid(idxoid)) { ereport(ERROR, (errmsg("could not find REPLICA IDENTITY index for table %s with oid %u", get_rel_name(RelationGetRelid(relinfo->ri_RelationDesc)), RelationGetRelid(relinfo->ri_RelationDesc)), errhint("The REPLICA IDENTITY index is usually the PRIMARY KEY. See the PostgreSQL docs for ALTER TABLE ... REPLICA IDENTITY"))); } *idxrelid = idxoid; idxrel = index_open(idxoid, RowExclusiveLock); /* Build scan key for just opened index*/ build_index_scan_key(index_key, relinfo->ri_RelationDesc, idxrel, tuple); /* Try to find the row and store any matching row in 'oldslot'. */ found = find_index_tuple(index_key, relinfo->ri_RelationDesc, idxrel, LockTupleExclusive, oldslot); /* Don't release lock until commit. */ index_close(idxrel, NoLock); return found; } /* * Find the tuple in a table using any index and returns the conflicting * index's oid, if any conflict found. * * This is not wholly safe. It does not consider the table's upstream replica * identity, and may choose to resolve the conflict on a unique index that * isn't part of the replica identity. * * Order-of-apply issues between multiple upstreams can lead to * non-deterministic behaviour in cases where we resolve one conflict using one * index, then a second conflict using a different index. * * We should really respect the replica identity more (i.e. use * pglogical_tuple_find_replidx). Or at least raise a WARNING that an * inconsistency may arise. */ Oid pglogical_tuple_find_conflict(ResultRelInfo *relinfo, PGLogicalTupleData *tuple, TupleTableSlot *outslot) { Oid conflict_idx = InvalidOid; ScanKeyData index_key[INDEX_MAX_KEYS]; int i; ItemPointerData conflicting_tid; Oid replidxoid; bool found = false; ItemPointerSetInvalid(&conflicting_tid); /* * Check the replica identity index with a SnapshotDirty scan first, like * pglogical_tuple_find_replidx, but without ERRORing if we don't find * a replica identity index. */ replidxoid = RelationGetReplicaIndex(relinfo->ri_RelationDesc); if (OidIsValid(replidxoid)) { ScanKeyData index_key[INDEX_MAX_KEYS]; Relation idxrel = index_open(replidxoid, RowExclusiveLock); build_index_scan_key(index_key, relinfo->ri_RelationDesc, idxrel, tuple); found = find_index_tuple(index_key, relinfo->ri_RelationDesc, idxrel, LockTupleExclusive, outslot); index_close(idxrel, NoLock); if (found) return replidxoid; } /* * Do a SnapshotDirty search for conflicting tuples. If any is found * store it in outslot and return the oid of the matching index. We * don't continue scanning for matches in other indexes, so we won't * notice if the tuple conflicts with another index, and it'll * raise a unique violation on apply instead. * * We could carry on here even if (found) and look for secondary conflicts, * but all we'd be able to do would be ERROR here instead of later. The * rest of the time we'd just pay a useless performance cost for extra * index scans. */ for (i = 0; i < relinfo->ri_NumIndices; i++) { IndexInfo *ii = relinfo->ri_IndexRelationInfo[i]; Relation idxrel; /* * Only unique indexes are of interest here, and we can't deal with * expression indexes so far. */ if (!ii->ii_Unique || ii->ii_Expressions != NIL) continue; /* * TODO: predicates should be handled better. There's no point scanning * an index where the predicates show it could never match anyway, and * it can produce false conflicts if the predicate includes non-indexed * columns. We could find a local tuple that matches the predicate in * the index, but there's only a true conflict if the remote tuple also * matches the predicate. If we ignore the predicate we generate a false * conflict. See RM#1839. * * For now we reject conflict resolution on indexes with predicates * entirely. If there's a conflict it'll be raised on apply with a * unique violation. */ if (ii->ii_Predicate != NIL) continue; idxrel = relinfo->ri_IndexRelationDescs[i]; /* No point re-scanning the replica identity index */ if (RelationGetRelid(idxrel) == replidxoid) continue; if (build_index_scan_key(index_key, relinfo->ri_RelationDesc, idxrel, tuple)) continue; /* Try to find conflicting row and store in 'outslot' */ found = find_index_tuple(index_key, relinfo->ri_RelationDesc, idxrel, LockTupleExclusive, outslot); if (found) { #if PG_VERSION_NUM >= 120000 ItemPointerCopy(&outslot->tts_tid, &conflicting_tid); #else ItemPointerCopy(&outslot->tts_tuple->t_self, &conflicting_tid); #endif conflict_idx = RelationGetRelid(idxrel); break; } CHECK_FOR_INTERRUPTS(); } return conflict_idx; } /* * Resolve conflict based on commit timestamp. */ static bool conflict_resolve_by_timestamp(RepOriginId local_origin_id, RepOriginId remote_origin_id, TimestampTz local_ts, TimestampTz remote_ts, bool last_update_wins, PGLogicalConflictResolution *resolution) { int cmp; cmp = timestamptz_cmp_internal(remote_ts, local_ts); /* * The logic bellow assumes last update wins, we invert the logic by * inverting result of timestamp comparison if first update wins was * requested. */ if (!last_update_wins) cmp = -cmp; if (cmp > 0) { /* The remote row wins, update the local one. */ *resolution = PGLogicalResolution_ApplyRemote; return true; } else if (cmp < 0) { /* The local row wins, retain it */ *resolution = PGLogicalResolution_KeepLocal; return false; } else { /* * The timestamps were equal, break the tie in a manner that is * consistent across all nodes. * * XXX: TODO, for now we just always apply remote change. */ *resolution = PGLogicalResolution_ApplyRemote; return true; } } /* * Get the origin of the local tuple. * * If the track_commit_timestamp is off, we return remote origin info since * there is no way to get any meaningful info locally. This means that * the caller will assume that all the local tuples came from remote site when * track_commit_timestamp is off. * * This function is used by UPDATE conflict detection so the above means that * UPDATEs will not be recognized as conflict even if they change locally * modified row. * * Returns true if local origin data was found, false if not. */ bool get_tuple_origin(HeapTuple local_tuple, TransactionId *xmin, RepOriginId *local_origin, TimestampTz *local_ts) { *xmin = HeapTupleHeaderGetXmin(local_tuple->t_data); if (!track_commit_timestamp) { *local_origin = replorigin_session_origin; *local_ts = replorigin_session_origin_timestamp; return false; } else { if (TransactionIdIsValid(*xmin) && !TransactionIdIsNormal(*xmin)) { /* * Pg emits an ERROR if you try to pass FrozenTransactionId (2) * or BootstrapTransactionId (1) to TransactionIdGetCommitTsData, * per RT#46983 . This seems like an oversight in the core function, * but we can work around it here by setting it to the same thing * we'd get if the xid's commit timestamp was trimmed already. */ *local_origin = InvalidRepOriginId; *local_ts = 0; return false; } else return TransactionIdGetCommitTsData(*xmin, local_ts, local_origin); } } /* * Try resolving the conflict resolution. * * Returns true when remote tuple should be applied. */ bool try_resolve_conflict(Relation rel, HeapTuple localtuple, HeapTuple remotetuple, HeapTuple *resulttuple, PGLogicalConflictResolution *resolution) { TransactionId xmin; TimestampTz local_ts; RepOriginId local_origin; bool apply = false; switch (pglogical_conflict_resolver) { case PGLOGICAL_RESOLVE_ERROR: /* TODO: proper error message */ elog(ERROR, "cannot apply conflicting row"); break; case PGLOGICAL_RESOLVE_APPLY_REMOTE: apply = true; *resolution = PGLogicalResolution_ApplyRemote; break; case PGLOGICAL_RESOLVE_KEEP_LOCAL: apply = false; *resolution = PGLogicalResolution_KeepLocal; break; case PGLOGICAL_RESOLVE_LAST_UPDATE_WINS: get_tuple_origin(localtuple, &xmin, &local_origin, &local_ts); apply = conflict_resolve_by_timestamp(local_origin, replorigin_session_origin, local_ts, replorigin_session_origin_timestamp, true, resolution); break; case PGLOGICAL_RESOLVE_FIRST_UPDATE_WINS: get_tuple_origin(localtuple, &xmin, &local_origin, &local_ts); apply = conflict_resolve_by_timestamp(local_origin, replorigin_session_origin, local_ts, replorigin_session_origin_timestamp, false, resolution); break; default: elog(ERROR, "unrecognized pglogical_conflict_resolver setting %d", pglogical_conflict_resolver); } if (apply) *resulttuple = remotetuple; else *resulttuple = localtuple; return apply; } #if 0 static char * conflict_type_to_string(PGLogicalConflictType conflict_type) { switch (conflict_type) { case CONFLICT_INSERT_INSERT: return "insert_insert"; case CONFLICT_UPDATE_UPDATE: return "update_update"; case CONFLICT_UPDATE_DELETE: return "update_delete"; case CONFLICT_DELETE_DELETE: return "delete_delete"; } /* Unreachable */ return NULL; } #endif static char * conflict_resolution_to_string(PGLogicalConflictResolution resolution) { switch (resolution) { case PGLogicalResolution_ApplyRemote: return "apply_remote"; case PGLogicalResolution_KeepLocal: return "keep_local"; case PGLogicalResolution_Skip: return "skip"; } /* Unreachable */ return NULL; } /* * Log the conflict to server log. * * There are number of tuples passed: * * - The local tuple we conflict with or NULL if not found [localtuple]; * * - If the remote tuple was an update, the key of the old tuple * as a PGLogicalTuple [oldkey] * * - The remote tuple, after we fill any defaults and apply any local * BEFORE triggers but before conflict resolution [remotetuple]; * * - The tuple we'll actually apply if any, after conflict resolution * [applytuple] * * The PGLogicalRelation's name info is for the remote rel. If we add relation * mapping we'll need to get the name/namespace of the local relation too. * * This runs in MessageContext so we don't have to worry about leaks, but * we still try to free the big chunks as we go. */ void pglogical_report_conflict(PGLogicalConflictType conflict_type, PGLogicalRelation *rel, HeapTuple localtuple, PGLogicalTupleData *oldkey, HeapTuple remotetuple, HeapTuple applytuple, PGLogicalConflictResolution resolution, TransactionId local_tuple_xid, bool found_local_origin, RepOriginId local_tuple_origin, TimestampTz local_tuple_commit_ts, Oid conflict_idx_oid, bool has_before_triggers) { char local_tup_ts_str[MAXDATELEN] = "(unset)"; StringInfoData localtup, remotetup; TupleDesc desc = RelationGetDescr(rel->rel); const char *idxname = "(unknown)"; const char *qualrelname; memset(local_tup_ts_str, 0, MAXDATELEN); if (found_local_origin) strcpy(local_tup_ts_str, timestamptz_to_str(local_tuple_commit_ts)); initStringInfo(&remotetup); tuple_to_stringinfo(&remotetup, desc, remotetuple); if (localtuple != NULL) { initStringInfo(&localtup); tuple_to_stringinfo(&localtup, desc, localtuple); } if (OidIsValid(conflict_idx_oid)) idxname = get_rel_name(conflict_idx_oid); qualrelname = quote_qualified_identifier( get_namespace_name(RelationGetNamespace(rel->rel)), RelationGetRelationName(rel->rel)); /* * We try to provide a lot of information about conflicting tuples because * the conflicts are often transient and timing-sensitive. It's rare that * we can examine a stopped system or reproduce them at leisure. So the * more info we have in the logs, the better chance we have of diagnosing * application issues. It's worth paying the price of some log spam. * * This deliberately somewhat overlaps with the context info we log with * log_error_verbosity=verbose because we don't necessarily have all that * info enabled. */ switch (conflict_type) { case CONFLICT_INSERT_INSERT: case CONFLICT_UPDATE_UPDATE: ereport(pglogical_conflict_log_level, (errcode(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION), errmsg("CONFLICT: remote %s on relation %s (local index %s). Resolution: %s.", conflict_type == CONFLICT_INSERT_INSERT ? "INSERT" : "UPDATE", qualrelname, idxname, conflict_resolution_to_string(resolution)), errdetail("existing local tuple {%s} xid=%u,origin=%d,timestamp=%s; remote tuple {%s}%s in xact origin=%u,timestamp=%s,commit_lsn=%X/%X", localtup.data, local_tuple_xid, found_local_origin ? (int)local_tuple_origin : -1, local_tup_ts_str, remotetup.data, has_before_triggers ? "*":"", replorigin_session_origin, timestamptz_to_str(replorigin_session_origin_timestamp), (uint32)(replorigin_session_origin_lsn<<32), (uint32)replorigin_session_origin_lsn))); break; case CONFLICT_UPDATE_DELETE: case CONFLICT_DELETE_DELETE: ereport(pglogical_conflict_log_level, (errcode(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION), errmsg("CONFLICT: remote %s on relation %s replica identity index %s (tuple not found). Resolution: %s.", conflict_type == CONFLICT_UPDATE_DELETE ? "UPDATE" : "DELETE", qualrelname, idxname, conflict_resolution_to_string(resolution)), errdetail("remote tuple {%s}%s in xact origin=%u,timestamp=%s,commit_lsn=%X/%X", remotetup.data, has_before_triggers ? "*":"", replorigin_session_origin, timestamptz_to_str(replorigin_session_origin_timestamp), (uint32)(replorigin_session_origin_lsn<<32), (uint32)replorigin_session_origin_lsn))); break; } } /* Checks validity of pglogical_conflict_resolver GUC */ bool pglogical_conflict_resolver_check_hook(int *newval, void **extra, GucSource source) { /* * Only allow PGLOGICAL_RESOLVE_APPLY_REMOTE when track_commit_timestamp * is off, because there is no way to know where the local tuple * originated from. */ if (!track_commit_timestamp && *newval != PGLOGICAL_RESOLVE_APPLY_REMOTE && *newval != PGLOGICAL_RESOLVE_ERROR) { GUC_check_errdetail("track_commit_timestamp is off"); return false; } return true; } /* * print the tuple 'tuple' into the StringInfo s * * (Based on bdr2) */ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple) { int natt; bool first = true; static const int MAX_CONFLICT_LOG_ATTR_LEN = 40; /* print all columns individually */ for (natt = 0; natt < tupdesc->natts; natt++) { Form_pg_attribute attr; /* the attribute itself */ Oid typid; /* type of current attribute */ HeapTuple type_tuple; /* information about a type */ Form_pg_type type_form; Oid typoutput; /* output function */ bool typisvarlena; Datum origval; /* possibly toasted Datum */ Datum val = PointerGetDatum(NULL); /* definitely detoasted Datum */ char *outputstr = NULL; bool isnull; /* column is null? */ attr = TupleDescAttr(tupdesc, natt); /* * don't print dropped columns, we can't be sure everything is * available for them */ if (attr->attisdropped) continue; /* * Don't print system columns */ if (attr->attnum < 0) continue; typid = attr->atttypid; /* gather type name */ type_tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(type_tuple)) elog(ERROR, "cache lookup failed for type %u", typid); type_form = (Form_pg_type) GETSTRUCT(type_tuple); /* print attribute name */ if (first) first = false; else appendStringInfoChar(s, ' '); appendStringInfoString(s, NameStr(attr->attname)); /* print attribute type */ appendStringInfoChar(s, '['); appendStringInfoString(s, NameStr(type_form->typname)); appendStringInfoChar(s, ']'); /* query output function */ getTypeOutputInfo(typid, &typoutput, &typisvarlena); ReleaseSysCache(type_tuple); /* get Datum from tuple */ origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); if (isnull) outputstr = "(null)"; else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(origval)) outputstr = "(unchanged-toast-datum)"; else if (typisvarlena) val = PointerGetDatum(PG_DETOAST_DATUM(origval)); else val = origval; /* print data */ if (outputstr == NULL) outputstr = OidOutputFunctionCall(typoutput, val); /* * Abbreviate the Datum if it's too long. This may make it syntatically * invalid, but it's not like we're writing out a valid ROW(...) as it * is. */ if (strlen(outputstr) > MAX_CONFLICT_LOG_ATTR_LEN) { /* The null written at the end of strcpy will truncate the string */ strcpy(&outputstr[MAX_CONFLICT_LOG_ATTR_LEN-5], "..."); } appendStringInfoChar(s, ':'); appendStringInfoString(s, outputstr); } } pglogical-REL2_4_1/pglogical_conflict.h000066400000000000000000000045471415142317000201670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_conflict.h * pglogical conflict detection and resolution * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_conflict.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_CONGLICT_H #define PGLOGICAL_CONGLICT_H #include "nodes/execnodes.h" #include "replication/origin.h" #include "utils/guc.h" #include "pglogical_proto_native.h" typedef enum PGLogicalConflictResolution { PGLogicalResolution_ApplyRemote, PGLogicalResolution_KeepLocal, PGLogicalResolution_Skip } PGLogicalConflictResolution; typedef enum { PGLOGICAL_RESOLVE_ERROR, PGLOGICAL_RESOLVE_APPLY_REMOTE, PGLOGICAL_RESOLVE_KEEP_LOCAL, PGLOGICAL_RESOLVE_LAST_UPDATE_WINS, PGLOGICAL_RESOLVE_FIRST_UPDATE_WINS } PGLogicalResolveOption; extern int pglogical_conflict_resolver; extern int pglogical_conflict_log_level; typedef enum PGLogicalConflictType { CONFLICT_INSERT_INSERT, CONFLICT_UPDATE_UPDATE, CONFLICT_UPDATE_DELETE, CONFLICT_DELETE_DELETE } PGLogicalConflictType; extern bool pglogical_tuple_find_replidx(ResultRelInfo *relinfo, PGLogicalTupleData *tuple, TupleTableSlot *oldslot, Oid *idxrelid); extern Oid pglogical_tuple_find_conflict(ResultRelInfo *relinfo, PGLogicalTupleData *tuple, TupleTableSlot *oldslot); extern bool get_tuple_origin(HeapTuple local_tuple, TransactionId *xmin, RepOriginId *local_origin, TimestampTz *local_ts); extern bool try_resolve_conflict(Relation rel, HeapTuple localtuple, HeapTuple remotetuple, HeapTuple *resulttuple, PGLogicalConflictResolution *resolution); extern void pglogical_report_conflict(PGLogicalConflictType conflict_type, PGLogicalRelation *rel, HeapTuple localtuple, PGLogicalTupleData *oldkey, HeapTuple remotetuple, HeapTuple applytuple, PGLogicalConflictResolution resolution, TransactionId local_tuple_xid, bool found_local_origin, RepOriginId local_tuple_origin, TimestampTz local_tuple_timestamp, Oid conflict_idx_id, bool has_before_triggers); extern bool pglogical_conflict_resolver_check_hook(int *newval, void **extra, GucSource source); #endif /* PGLOGICAL_CONGLICT_H */ pglogical-REL2_4_1/pglogical_create_subscriber.c000066400000000000000000001336421415142317000220460ustar00rootroot00000000000000/* ------------------------------------------------------------------------- * * pglogical_create_subscriber.c * Initialize a new pglogical subscriber from a physical base backup * * Copyright (C) 2012-2016, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_create_subscriber.c * * ------------------------------------------------------------------------- */ /* dirent.h on port/win32_msvc expects MAX_PATH to be defined */ #if defined(_WIN32) #define WIN32_LEAN_AND_MEAN #include #endif #include #include #include #include #include #include #include #include #include #include /* Note the order is important for debian here. */ #if !defined(pg_attribute_printf) /* GCC and XLC support format attributes */ #if defined(__GNUC__) || defined(__IBMC__) #define pg_attribute_format_arg(a) __attribute__((format_arg(a))) #define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a))) #else #define pg_attribute_format_arg(a) #define pg_attribute_printf(f,a) #endif #endif #include "libpq-fe.h" #include "postgres_fe.h" #include "pqexpbuffer.h" #include "getopt_long.h" #include "miscadmin.h" #include "access/timeline.h" #include "access/xlog_internal.h" #include "catalog/pg_control.h" #include "pglogical_fe.h" #define MAX_APPLY_DELAY 86400 typedef struct RemoteInfo { Oid nodeid; char *node_name; char *sysid; char *dbname; char *replication_sets; } RemoteInfo; typedef enum { VERBOSITY_NORMAL, VERBOSITY_VERBOSE, VERBOSITY_DEBUG } VerbosityLevelEnum; static char *argv0 = NULL; static const char *progname; static char *data_dir = NULL; static char pid_file[MAXPGPATH]; static time_t start_time; static VerbosityLevelEnum verbosity = VERBOSITY_NORMAL; /* defined as static so that die() can close them */ static PGconn *subscriber_conn = NULL; static PGconn *provider_conn = NULL; static void signal_handler(int sig); static void usage(void); static void die(const char *fmt,...) pg_attribute_printf(1, 2); static void print_msg(VerbosityLevelEnum level, const char *fmt,...) pg_attribute_printf(2, 3); static int run_pg_ctl(const char *arg); static void run_basebackup(const char *provider_connstr, const char *data_dir, const char *extra_basebackup_args); static void wait_postmaster_connection(const char *connstr); static void wait_primary_connection(const char *connstr); static void wait_postmaster_shutdown(void); static char *validate_replication_set_input(char *replication_sets); static void remove_unwanted_data(PGconn *conn); static void initialize_replication_origin(PGconn *conn, char *origin_name, char *remote_lsn); static char *create_restore_point(PGconn *conn, char *restore_point_name); static char *initialize_replication_slot(PGconn *conn, char *dbname, char *provider_node_name, char *subscription_name, bool drop_slot_if_exists); static void pglogical_subscribe(PGconn *conn, char *subscriber_name, char *subscriber_dsn, char *provider_connstr, char *replication_sets, int apply_delay, bool force_text_transfer); static RemoteInfo *get_remote_info(PGconn* conn); static bool extension_exists(PGconn *conn, const char *extname); static void install_extension(PGconn *conn, const char *extname); static void initialize_data_dir(char *data_dir, char *connstr, char *postgresql_conf, char *pg_hba_conf, char *extra_basebackup_args); static bool check_data_dir(char *data_dir, RemoteInfo *remoteinfo); static char *read_sysid(const char *data_dir); static void WriteRecoveryConf(PQExpBuffer contents); static void CopyConfFile(char *fromfile, char *tofile, bool append); static char *get_connstr_dbname(char *connstr); static char *get_connstr(char *connstr, char *dbname); static char *PQconninfoParamsToConnstr(const char *const * keywords, const char *const * values); static void appendPQExpBufferConnstrValue(PQExpBuffer buf, const char *str); static bool file_exists(const char *path); static bool is_pg_dir(const char *path); static void copy_file(char *fromfile, char *tofile, bool append); static char *find_other_exec_or_die(const char *argv0, const char *target); static bool postmaster_is_alive(pid_t pid); static long get_pgpid(void); static char **get_database_list(char *databases, int *n_databases); static char *generate_restore_point_name(void); static PGconn * connectdb(const char *connstr) { PGconn *conn; conn = PQconnectdb(connstr); if (PQstatus(conn) != CONNECTION_OK) die(_("Connection to database failed: %s, connection string was: %s\n"), PQerrorMessage(conn), connstr); return conn; } void signal_handler(int sig) { if (sig == SIGINT) { die(_("\nCanceling...\n")); } } int main(int argc, char **argv) { int i; int c; PQExpBuffer recoveryconfcontents = createPQExpBuffer(); RemoteInfo *remote_info; char *remote_lsn; bool stop = false; bool drop_slot_if_exists = false; int optindex; char *subscriber_name = NULL; char *base_sub_connstr = NULL; char *base_prov_connstr = NULL; char *replication_sets = NULL; char *databases = NULL; char *postgresql_conf = NULL, *pg_hba_conf = NULL, *recovery_conf = NULL; int apply_delay = 0; bool force_text_transfer = false; char **slot_names; char *sub_connstr; char *prov_connstr; char **database_list = { NULL }; int n_databases = 1; int dbnum; bool use_existing_data_dir = false; int pg_ctl_ret, logfd; char *restore_point_name = NULL; char *extra_basebackup_args = NULL; static struct option long_options[] = { {"subscriber-name", required_argument, NULL, 'n'}, {"pgdata", required_argument, NULL, 'D'}, {"provider-dsn", required_argument, NULL, 1}, {"subscriber-dsn", required_argument, NULL, 2}, {"replication-sets", required_argument, NULL, 3}, {"postgresql-conf", required_argument, NULL, 4}, {"hba-conf", required_argument, NULL, 5}, {"recovery-conf", required_argument, NULL, 6}, {"stop", no_argument, NULL, 's'}, {"drop-slot-if-exists", no_argument, NULL, 7}, {"apply-delay", required_argument, NULL, 8}, {"databases", required_argument, NULL, 9}, {"extra-basebackup-args", required_argument, NULL, 10}, {"text-types", no_argument, NULL, 11}, {NULL, 0, NULL, 0} }; argv0 = argv[0]; progname = get_progname(argv[0]); start_time = time(NULL); signal(SIGINT, signal_handler); /* check for --help */ if (argc > 1) { for (i = 1; i < argc; i++) { if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "-?") == 0) { usage(); exit(0); } } } /* Option parsing and validation */ while ((c = getopt_long(argc, argv, "D:n:sv", long_options, &optindex)) != -1) { switch (c) { case 'D': data_dir = pg_strdup(optarg); break; case 'n': subscriber_name = pg_strdup(optarg); break; case 1: base_prov_connstr = pg_strdup(optarg); break; case 2: base_sub_connstr = pg_strdup(optarg); break; case 3: replication_sets = validate_replication_set_input(pg_strdup(optarg)); break; case 4: { postgresql_conf = pg_strdup(optarg); if (postgresql_conf != NULL && !file_exists(postgresql_conf)) die(_("The specified postgresql.conf file does not exist.")); break; } case 5: { pg_hba_conf = pg_strdup(optarg); if (pg_hba_conf != NULL && !file_exists(pg_hba_conf)) die(_("The specified pg_hba.conf file does not exist.")); break; } case 6: { recovery_conf = pg_strdup(optarg); if (recovery_conf != NULL && !file_exists(recovery_conf)) die(_("The specified recovery configuration file does not exist.")); break; } case 'v': verbosity++; break; case 's': stop = true; break; case 7: drop_slot_if_exists = true; break; case 8: apply_delay = atoi(optarg); break; case 9: databases = pg_strdup(optarg); break; case 10: extra_basebackup_args = pg_strdup(optarg); break; case 11: force_text_transfer = true; break; default: fprintf(stderr, _("Unknown option\n")); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } } /* * Sanity checks */ if (data_dir == NULL) { fprintf(stderr, _("No data directory specified\n")); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } else if (subscriber_name == NULL) { fprintf(stderr, _("No subscriber name specified\n")); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } if (!base_prov_connstr || !strlen(base_prov_connstr)) die(_("Provider connection string must be specified.\n")); if (!base_sub_connstr || !strlen(base_sub_connstr)) die(_("Subscriber connection string must be specified.\n")); if (apply_delay < 0) die(_("Apply delay cannot be negative.\n")); if (apply_delay > MAX_APPLY_DELAY) die(_("Apply delay cannot be more than %d.\n"), MAX_APPLY_DELAY); if (!replication_sets || !strlen(replication_sets)) replication_sets = "default,default_insert_only,ddl_sql"; /* Init random numbers used for slot suffixes, etc */ srand(time(NULL)); /* Parse database list or connection string. */ if (databases != NULL) { database_list = get_database_list(databases, &n_databases); } else { char *dbname = get_connstr_dbname(base_prov_connstr); if (!dbname) die(_("Either provider connection string must contain database " "name or --databases option must be specified.\n")); n_databases = 1; database_list = palloc(n_databases * sizeof(char *)); database_list[0] = dbname; } slot_names = palloc(n_databases * sizeof(char *)); /* * Check connection strings for validity before doing anything * expensive. */ for (dbnum = 0; dbnum < n_databases; dbnum++) { char *db = database_list[dbnum]; prov_connstr = get_connstr(base_prov_connstr, db); if (!prov_connstr || !strlen(prov_connstr)) die(_("Provider connection string is not valid.\n")); sub_connstr = get_connstr(base_sub_connstr, db); if (!sub_connstr || !strlen(sub_connstr)) die(_("Subscriber connection string is not valid.\n")); } /* * Create log file where new postgres instance will log to while being * initialized. */ logfd = open("pglogical_create_subscriber_postgres.log", O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); if (logfd == -1) { die(_("Creating pglogical_create_subscriber_postgres.log failed: %s"), strerror(errno)); } /* Safe to close() unchecked, we didn't write */ (void) close(logfd); /* Let's start the real work... */ print_msg(VERBOSITY_NORMAL, _("%s: starting ...\n"), progname); for (dbnum = 0; dbnum < n_databases; dbnum++) { char *db = database_list[dbnum]; prov_connstr = get_connstr(base_prov_connstr, db); if (!prov_connstr || !strlen(prov_connstr)) die(_("Provider connection string is not valid.\n")); /* Read the remote server indetification. */ print_msg(VERBOSITY_NORMAL, _("Getting information for database %s ...\n"), db); provider_conn = connectdb(prov_connstr); remote_info = get_remote_info(provider_conn); /* only need to do this piece once */ if (dbnum == 0) { use_existing_data_dir = check_data_dir(data_dir, remote_info); if (use_existing_data_dir && strcmp(remote_info->sysid, read_sysid(data_dir)) != 0) die(_("Subscriber data directory is not basebackup of remote node.\n")); } /* * Create replication slots on remote node. */ print_msg(VERBOSITY_NORMAL, _("Creating replication slot in database %s ...\n"), db); slot_names[dbnum] = initialize_replication_slot(provider_conn, remote_info->dbname, remote_info->node_name, subscriber_name, drop_slot_if_exists); PQfinish(provider_conn); provider_conn = NULL; } /* * Create basebackup or use existing one */ prov_connstr = get_connstr(base_prov_connstr, database_list[0]); sub_connstr = get_connstr(base_sub_connstr, database_list[0]); initialize_data_dir(data_dir, use_existing_data_dir ? NULL : prov_connstr, postgresql_conf, pg_hba_conf, extra_basebackup_args); snprintf(pid_file, MAXPGPATH, "%s/postmaster.pid", data_dir); restore_point_name = generate_restore_point_name(); print_msg(VERBOSITY_NORMAL, _("Creating restore point \"%s\" on remote node ...\n"), restore_point_name); provider_conn = connectdb(prov_connstr); remote_lsn = create_restore_point(provider_conn, restore_point_name); PQfinish(provider_conn); provider_conn = NULL; /* * Get subscriber db to consistent state (for lsn after slot creation). */ print_msg(VERBOSITY_NORMAL, _("Bringing subscriber node to the restore point ...\n")); if (recovery_conf) { #if PG_VERSION_NUM >= 120000 CopyConfFile(recovery_conf, "postgresql.auto.conf", true); #else CopyConfFile(recovery_conf, "recovery.conf", false); #endif } else { #if PG_VERSION_NUM < 120000 appendPQExpBuffer(recoveryconfcontents, "standby_mode = 'on'\n"); #endif appendPQExpBuffer(recoveryconfcontents, "primary_conninfo = '%s'\n", escape_single_quotes_ascii(prov_connstr)); } appendPQExpBuffer(recoveryconfcontents, "recovery_target_name = '%s'\n", restore_point_name); appendPQExpBuffer(recoveryconfcontents, "recovery_target_inclusive = true\n"); #if PG_VERSION_NUM >= 90500 appendPQExpBuffer(recoveryconfcontents, "recovery_target_action = promote\n"); #else appendPQExpBuffer(recoveryconfcontents, "pause_at_recovery_target = false\n"); #endif WriteRecoveryConf(recoveryconfcontents); free(restore_point_name); restore_point_name = NULL; /* * Start subscriber node with pglogical disabled, and wait until it starts * accepting connections which means it has caught up to the restore point. */ pg_ctl_ret = run_pg_ctl("start -l \"pglogical_create_subscriber_postgres.log\" -o \"-c shared_preload_libraries=''\""); if (pg_ctl_ret != 0) die(_("Postgres startup for restore point catchup failed with %d. See pglogical_create_subscriber_postgres.log."), pg_ctl_ret); wait_primary_connection(sub_connstr); /* * Clean any per-node data that were copied by pg_basebackup. */ print_msg(VERBOSITY_VERBOSE, _("Removing old pglogical configuration ...\n")); for (dbnum = 0; dbnum < n_databases; dbnum++) { char *db = database_list[dbnum]; sub_connstr = get_connstr(base_sub_connstr, db); if (!sub_connstr || !strlen(sub_connstr)) die(_("Subscriber connection string is not valid.\n")); subscriber_conn = connectdb(sub_connstr); remove_unwanted_data(subscriber_conn); PQfinish(subscriber_conn); subscriber_conn = NULL; } /* Stop Postgres so we can reset system id and start it with pglogical loaded. */ pg_ctl_ret = run_pg_ctl("stop"); if (pg_ctl_ret != 0) die(_("Postgres stop after restore point catchup failed with %d. See pglogical_create_subscriber_postgres.log."), pg_ctl_ret); wait_postmaster_shutdown(); /* * Start the node again, now with pglogical active so that we can start the * logical replication. This is final start, so don't log to to special log * file anymore. */ print_msg(VERBOSITY_NORMAL, _("Initializing pglogical on the subscriber node:\n")); pg_ctl_ret = run_pg_ctl("start"); if (pg_ctl_ret != 0) die(_("Postgres restart with pglogical enabled failed with %d."), pg_ctl_ret); wait_postmaster_connection(base_sub_connstr); for (dbnum = 0; dbnum < n_databases; dbnum++) { char *db = database_list[dbnum]; sub_connstr = get_connstr(base_sub_connstr, db); prov_connstr = get_connstr(base_prov_connstr, db); subscriber_conn = connectdb(sub_connstr); /* Create the extension. */ print_msg(VERBOSITY_VERBOSE, _("Creating pglogical extension for database %s...\n"), db); if (PQserverVersion(subscriber_conn) < 90500) install_extension(subscriber_conn, "pglogical_origin"); install_extension(subscriber_conn, "pglogical"); /* * Create the identifier which is setup with the position to which we * already caught up using physical replication. */ print_msg(VERBOSITY_VERBOSE, _("Creating replication origin for database %s...\n"), db); initialize_replication_origin(subscriber_conn, slot_names[dbnum], remote_lsn); /* * And finally add the node to the cluster. */ print_msg(VERBOSITY_NORMAL, _("Creating subscriber %s for database %s...\n"), subscriber_name, db); print_msg(VERBOSITY_VERBOSE, _("Replication sets: %s\n"), replication_sets); pglogical_subscribe(subscriber_conn, subscriber_name, sub_connstr, prov_connstr, replication_sets, apply_delay, force_text_transfer); PQfinish(subscriber_conn); subscriber_conn = NULL; } /* If user does not want the node to be running at the end, stop it. */ if (stop) { print_msg(VERBOSITY_NORMAL, _("Stopping the subscriber node ...\n")); pg_ctl_ret = run_pg_ctl("stop"); if (pg_ctl_ret != 0) die(_("Stopping postgres after successful subscribtion failed with %d."), pg_ctl_ret); wait_postmaster_shutdown(); } print_msg(VERBOSITY_NORMAL, _("All done\n")); return 0; } /* * Print help. */ static void usage(void) { printf(_("%s create new pglogical subscriber from basebackup of provider.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]...\n"), progname); printf(_("\nGeneral options:\n")); printf(_(" -D, --pgdata=DIRECTORY data directory to be used for new node,\n")); printf(_(" can be either empty/non-existing directory,\n")); printf(_(" or directory populated using\n")); printf(_(" pg_basebackup -X stream command\n")); printf(_(" --databases optional list of databases to replicate\n")); printf(_(" -n, --subscriber-name=NAME name of the newly created subscriber\n")); printf(_(" --subscriber-dsn=CONNSTR connection string to the newly created subscriber\n")); printf(_(" --provider-dsn=CONNSTR connection string to the provider\n")); printf(_(" --replication-sets=SETS comma separated list of replication set names\n")); printf(_(" --apply-delay=DELAY apply delay in seconds (by default 0)\n")); printf(_(" --drop-slot-if-exists drop replication slot of conflicting name\n")); printf(_(" -s, --stop stop the server once the initialization is done\n")); printf(_(" -v increase logging verbosity\n")); printf(_(" --extra-basebackup-args additional arguments to pass to pg_basebackup.\n")); printf(_(" Safe options: -T, -c, --xlogdir/--waldir\n")); printf(_("\nConfiguration files override:\n")); printf(_(" --hba-conf path to the new pg_hba.conf\n")); printf(_(" --postgresql-conf path to the new postgresql.conf\n")); printf(_(" --recovery-conf path to the template recovery configuration\n")); } /* * Print error and exit. */ static void die(const char *fmt,...) { va_list argptr; va_start(argptr, fmt); vfprintf(stderr, fmt, argptr); va_end(argptr); if (subscriber_conn) PQfinish(subscriber_conn); if (provider_conn) PQfinish(provider_conn); if (get_pgpid()) { if (!run_pg_ctl("stop -s")) { fprintf(stderr, _("WARNING: postgres seems to be running, but could not be stopped\n")); } } exit(1); } /* * Print message to stdout and flush */ static void print_msg(VerbosityLevelEnum level, const char *fmt,...) { if (verbosity >= level) { va_list argptr; va_start(argptr, fmt); vfprintf(stdout, fmt, argptr); va_end(argptr); fflush(stdout); } } /* * Start pg_ctl with given argument(s) - used to start/stop postgres * * Returns the exit code reported by pg_ctl. If pg_ctl exits due to a * signal this call will die and not return. */ static int run_pg_ctl(const char *arg) { int ret; PQExpBuffer cmd = createPQExpBuffer(); char *exec_path = find_other_exec_or_die(argv0, "pg_ctl"); appendPQExpBuffer(cmd, "%s %s -D \"%s\"", exec_path, arg, data_dir); /* Run pg_ctl in silent mode unless we run in debug mode. */ if (verbosity < VERBOSITY_DEBUG) appendPQExpBuffer(cmd, " -s"); print_msg(VERBOSITY_DEBUG, _("Running pg_ctl: %s.\n"), cmd->data); ret = system(cmd->data); destroyPQExpBuffer(cmd); if (WIFEXITED(ret)) return WEXITSTATUS(ret); else if (WIFSIGNALED(ret)) die(_("pg_ctl exited with signal %d"), WTERMSIG(ret)); else die(_("pg_ctl exited for an unknown reason (system() returned %d)"), ret); return -1; } /* * Run pg_basebackup to create the copy of the origin node. */ static void run_basebackup(const char *provider_connstr, const char *data_dir, const char *extra_basebackup_args) { int ret; PQExpBuffer cmd = createPQExpBuffer(); char *exec_path = find_other_exec_or_die(argv0, "pg_basebackup"); appendPQExpBuffer(cmd, "%s -D \"%s\" -d \"%s\" -X s -P", exec_path, data_dir, provider_connstr); /* Run pg_basebackup in verbose mode if we are running in verbose mode. */ if (verbosity >= VERBOSITY_VERBOSE) appendPQExpBuffer(cmd, " -v"); if (extra_basebackup_args != NULL) appendPQExpBuffer(cmd, "%s", extra_basebackup_args); print_msg(VERBOSITY_DEBUG, _("Running pg_basebackup: %s.\n"), cmd->data); ret = system(cmd->data); destroyPQExpBuffer(cmd); if (WIFEXITED(ret) && WEXITSTATUS(ret) == 0) return; if (WIFEXITED(ret)) die(_("pg_basebackup failed with exit status %d, cannot continue.\n"), WEXITSTATUS(ret)); else if (WIFSIGNALED(ret)) die(_("pg_basebackup exited with signal %d, cannot continue"), WTERMSIG(ret)); else die(_("pg_basebackup exited for an unknown reason (system() returned %d)"), ret); } /* * Init the datadir * * This function can either ensure provided datadir is a postgres datadir, * or create it using pg_basebackup. * * In any case, new postresql.conf and pg_hba.conf will be copied to the * datadir if they are provided. */ static void initialize_data_dir(char *data_dir, char *connstr, char *postgresql_conf, char *pg_hba_conf, char *extra_basebackup_args) { if (connstr) { print_msg(VERBOSITY_NORMAL, _("Creating base backup of the remote node...\n")); run_basebackup(connstr, data_dir, extra_basebackup_args); } if (postgresql_conf) CopyConfFile(postgresql_conf, "postgresql.conf", false); if (pg_hba_conf) CopyConfFile(pg_hba_conf, "pg_hba.conf", false); } /* * This function checks if provided datadir is clone of the remote node * described by the remote info, or if it's emtpy directory that can be used * as new datadir. */ static bool check_data_dir(char *data_dir, RemoteInfo *remoteinfo) { /* Run basebackup as needed. */ switch (pg_check_dir(data_dir)) { case 0: /* Does not exist */ case 1: /* Exists, empty */ return false; case 2: case 3: /* Exists, not empty */ case 4: { if (!is_pg_dir(data_dir)) die(_("Directory \"%s\" exists but is not valid postgres data directory.\n"), data_dir); return true; } case -1: /* Access problem */ die(_("Could not access directory \"%s\": %s.\n"), data_dir, strerror(errno)); } /* Unreachable */ die(_("Unexpected result from pg_check_dir() call")); return false; } /* * Initialize replication slots */ static char * initialize_replication_slot(PGconn *conn, char *dbname, char *provider_node_name, char *subscription_name, bool drop_slot_if_exists) { PQExpBufferData query; char *slot_name; PGresult *res; /* Generate the slot name. */ initPQExpBuffer(&query); printfPQExpBuffer(&query, "SELECT pglogical.pglogical_gen_slot_name(%s, %s, %s)", PQescapeLiteral(conn, dbname, strlen(dbname)), PQescapeLiteral(conn, provider_node_name, strlen(provider_node_name)), PQescapeLiteral(conn, subscription_name, strlen(subscription_name))); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) die(_("Could generate slot name: %s"), PQerrorMessage(conn)); slot_name = pstrdup(PQgetvalue(res, 0, 0)); PQclear(res); resetPQExpBuffer(&query); /* Check if the current slot exists. */ printfPQExpBuffer(&query, "SELECT 1 FROM pg_catalog.pg_replication_slots WHERE slot_name = %s", PQescapeLiteral(conn, slot_name, strlen(slot_name))); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) die(_("Could not fetch existing slot information: %s"), PQerrorMessage(conn)); /* Drop the existing slot when asked for it or error if it already exists. */ if (PQntuples(res) > 0) { PQclear(res); resetPQExpBuffer(&query); if (!drop_slot_if_exists) die(_("Slot %s already exists, drop it or use --drop-slot-if-exists to drop it automatically.\n"), slot_name); print_msg(VERBOSITY_VERBOSE, _("Droping existing slot %s ...\n"), slot_name); printfPQExpBuffer(&query, "SELECT pg_catalog.pg_drop_replication_slot(%s)", PQescapeLiteral(conn, slot_name, strlen(slot_name))); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) die(_("Could not drop existing slot %s: %s"), slot_name, PQerrorMessage(conn)); } PQclear(res); resetPQExpBuffer(&query); /* And finally, create the slot. */ appendPQExpBuffer(&query, "SELECT pg_create_logical_replication_slot(%s, '%s');", PQescapeLiteral(conn, slot_name, strlen(slot_name)), "pglogical_output"); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not create replication slot, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); termPQExpBuffer(&query); return slot_name; } /* * Read replication info about remote connection * * TODO: unify with pglogical_remote_node_info in pglogical_rpc */ static RemoteInfo * get_remote_info(PGconn* conn) { RemoteInfo *ri = (RemoteInfo *)pg_malloc0(sizeof(RemoteInfo)); PGresult *res; if (!extension_exists(conn, "pglogical")) die(_("The remote node is not configured as a pglogical provider.\n")); res = PQexec(conn, "SELECT node_id, node_name, sysid, dbname, replication_sets FROM pglogical.pglogical_node_info()"); if (PQresultStatus(res) != PGRES_TUPLES_OK) die(_("could not fetch remote node info: %s\n"), PQerrorMessage(conn)); /* No nodes found? */ if (PQntuples(res) == 0) die(_("The remote database is not configured as a pglogical node.\n")); if (PQntuples(res) > 1) die(_("The remote database has multiple nodes configured. That is not supported with current version of pglogical.\n")); #define atooid(x) ((Oid) strtoul((x), NULL, 10)) ri->nodeid = atooid(PQgetvalue(res, 0, 0)); ri->node_name = pstrdup(PQgetvalue(res, 0, 1)); ri->sysid = pstrdup(PQgetvalue(res, 0, 2)); ri->dbname = pstrdup(PQgetvalue(res, 0, 3)); ri->replication_sets = pstrdup(PQgetvalue(res, 0, 4)); PQclear(res); return ri; } /* * Check if extension exists. */ static bool extension_exists(PGconn *conn, const char *extname) { PQExpBuffer query = createPQExpBuffer(); PGresult *res; bool ret; printfPQExpBuffer(query, "SELECT 1 FROM pg_catalog.pg_extension WHERE extname = %s;", PQescapeLiteral(conn, extname, strlen(extname))); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { PQclear(res); die(_("Could not read extension info: %s\n"), PQerrorMessage(conn)); } ret = PQntuples(res) == 1; PQclear(res); destroyPQExpBuffer(query); return ret; } /* * Create extension. */ static void install_extension(PGconn *conn, const char *extname) { PQExpBuffer query = createPQExpBuffer(); PGresult *res; printfPQExpBuffer(query, "CREATE EXTENSION IF NOT EXISTS %s;", PQescapeIdentifier(conn, extname, strlen(extname))); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_COMMAND_OK) { PQclear(res); die(_("Could not install %s extension: %s\n"), extname, PQerrorMessage(conn)); } PQclear(res); destroyPQExpBuffer(query); } /* * Clean all the data that was copied from remote node but we don't * want it here (currently shared security labels and replication identifiers). */ static void remove_unwanted_data(PGconn *conn) { PGresult *res; /* * Remove replication identifiers (9.4 will get them removed by dropping * the extension later as we emulate them there). */ if (PQserverVersion(conn) >= 90500) { res = PQexec(conn, "SELECT pg_replication_origin_drop(external_id) FROM pg_replication_origin_status;"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { PQclear(res); die(_("Could not remove existing replication origins: %s\n"), PQerrorMessage(conn)); } PQclear(res); } res = PQexec(conn, "DROP EXTENSION pglogical CASCADE;"); if (PQresultStatus(res) != PGRES_COMMAND_OK) { die(_("Could not clean the pglogical extension, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); } /* * Initialize new remote identifier to specific position. */ static void initialize_replication_origin(PGconn *conn, char *origin_name, char *remote_lsn) { PGresult *res; PQExpBuffer query = createPQExpBuffer(); if (PQserverVersion(conn) >= 90500) { printfPQExpBuffer(query, "SELECT pg_replication_origin_create(%s)", PQescapeLiteral(conn, origin_name, strlen(origin_name))); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not create replication origin \"%s\": status %s: %s\n"), query->data, PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); if (remote_lsn) { printfPQExpBuffer(query, "SELECT pg_replication_origin_advance(%s, '%s')", PQescapeLiteral(conn, origin_name, strlen(origin_name)), remote_lsn); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not advance replication origin \"%s\": status %s: %s\n"), query->data, PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); } } else { printfPQExpBuffer(query, "INSERT INTO pglogical_origin.replication_origin (roident, roname, roremote_lsn) SELECT COALESCE(MAX(roident::int), 0) + 1, %s, %s FROM pglogical_origin.replication_origin", PQescapeLiteral(conn, origin_name, strlen(origin_name)), remote_lsn ? PQescapeLiteral(conn, remote_lsn, strlen(remote_lsn)) : "0"); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_COMMAND_OK) { die(_("Could not create replication origin \"%s\": status %s: %s\n"), query->data, PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); } destroyPQExpBuffer(query); } /* * Create remote restore point which will be used to get into synchronized * state through physical replay. */ static char * create_restore_point(PGconn *conn, char *restore_point_name) { PQExpBuffer query = createPQExpBuffer(); PGresult *res; char *remote_lsn = NULL; printfPQExpBuffer(query, "SELECT pg_create_restore_point('%s')", restore_point_name); res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not create restore point, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } remote_lsn = pstrdup(PQgetvalue(res, 0, 0)); PQclear(res); destroyPQExpBuffer(query); return remote_lsn; } static void pglogical_subscribe(PGconn *conn, char *subscriber_name, char *subscriber_dsn, char *provider_dsn, char *replication_sets, int apply_delay, bool force_text_transfer) { PQExpBufferData query; PQExpBufferData repsets; PGresult *res; initPQExpBuffer(&query); printfPQExpBuffer(&query, "SELECT pglogical.create_node(node_name := %s, dsn := %s);", PQescapeLiteral(conn, subscriber_name, strlen(subscriber_name)), PQescapeLiteral(conn, subscriber_dsn, strlen(subscriber_dsn))); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not create local node, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); resetPQExpBuffer(&query); initPQExpBuffer(&repsets); printfPQExpBuffer(&repsets, "{%s}", replication_sets); printfPQExpBuffer(&query, "SELECT pglogical.create_subscription(" "subscription_name := %s, provider_dsn := %s, " "replication_sets := %s, " "apply_delay := '%d seconds'::interval, " "synchronize_structure := false, " "synchronize_data := false, " "force_text_transfer := '%s');", PQescapeLiteral(conn, subscriber_name, strlen(subscriber_name)), PQescapeLiteral(conn, provider_dsn, strlen(provider_dsn)), PQescapeLiteral(conn, repsets.data, repsets.len), apply_delay, (force_text_transfer ? "t" : "f")); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { die(_("Could not create subscription, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); /* TODO */ res = PQexec(conn, "UPDATE pglogical.local_sync_status SET sync_status = 'r'"); if (PQresultStatus(res) != PGRES_COMMAND_OK) { die(_("Could not update subscription, status %s: %s\n"), PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); } PQclear(res); termPQExpBuffer(&repsets); termPQExpBuffer(&query); } /* * Validates input of the replication sets and returns normalized data. */ static char * validate_replication_set_input(char *replication_sets) { char *name; PQExpBuffer retbuf = createPQExpBuffer(); char *ret; bool first = true; if (!replication_sets) return NULL; name = strtok(replication_sets, " ,"); while (name != NULL) { const char *cp; if (strlen(name) == 0) die(_("Replication set name \"%s\" is too short\n"), name); if (strlen(name) > NAMEDATALEN) die(_("Replication set name \"%s\" is too long\n"), name); for (cp = name; *cp; cp++) { if (!((*cp >= 'a' && *cp <= 'z') || (*cp >= '0' && *cp <= '9') || (*cp == '_') || (*cp == '-'))) { die(_("Replication set name \"%s\" contains invalid character\n"), name); } } if (first) first = false; else appendPQExpBufferStr(retbuf, ", "); appendPQExpBufferStr(retbuf, name); name = strtok(NULL, " ,"); } ret = pg_strdup(retbuf->data); destroyPQExpBuffer(retbuf); return ret; } static char * get_connstr_dbname(char *connstr) { PQconninfoOption *conn_opts = NULL; PQconninfoOption *conn_opt; char *err_msg = NULL; char *ret = NULL; conn_opts = PQconninfoParse(connstr, &err_msg); if (conn_opts == NULL) { die(_("Invalid connection string: %s\n"), err_msg); } for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++) { if (strcmp(conn_opt->keyword, "dbname") == 0) { ret = pstrdup(conn_opt->val); break; } } PQconninfoFree(conn_opts); return ret; } /* * Build connection string from individual parameter. * * dbname can be specified in connstr parameter */ static char * get_connstr(char *connstr, char *dbname) { char *ret; int argcount = 4; /* dbname, host, user, port */ int i; const char **keywords; const char **values; PQconninfoOption *conn_opts = NULL; PQconninfoOption *conn_opt; char *err_msg = NULL; /* * Merge the connection info inputs given in form of connection string * and options */ i = 0; if (connstr && (strncmp(connstr, "postgresql://", 13) == 0 || strncmp(connstr, "postgres://", 11) == 0 || strchr(connstr, '=') != NULL)) { conn_opts = PQconninfoParse(connstr, &err_msg); if (conn_opts == NULL) { die(_("Invalid connection string: %s\n"), err_msg); } for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++) { if (conn_opt->val != NULL && conn_opt->val[0] != '\0') argcount++; } keywords = pg_malloc0((argcount + 1) * sizeof(*keywords)); values = pg_malloc0((argcount + 1) * sizeof(*values)); for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++) { /* If db* parameters were provided, we'll fill them later. */ if (dbname && strcmp(conn_opt->keyword, "dbname") == 0) continue; if (conn_opt->val != NULL && conn_opt->val[0] != '\0') { keywords[i] = conn_opt->keyword; values[i] = conn_opt->val; i++; } } } else { keywords = pg_malloc0((argcount + 1) * sizeof(*keywords)); values = pg_malloc0((argcount + 1) * sizeof(*values)); /* * If connstr was provided but it's not in connection string format and * the dbname wasn't provided then connstr is actually dbname. */ if (connstr && !dbname) dbname = connstr; } if (dbname) { keywords[i] = "dbname"; values[i] = dbname; i++; } ret = PQconninfoParamsToConnstr(keywords, values); /* Connection ok! */ pg_free(values); pg_free(keywords); if (conn_opts) PQconninfoFree(conn_opts); return ret; } /* * Reads the pg_control file of the existing data dir. */ static char * read_sysid(const char *data_dir) { ControlFileData ControlFile; int fd; char ControlFilePath[MAXPGPATH]; char *res = (char *) pg_malloc0(33); snprintf(ControlFilePath, MAXPGPATH, "%s/global/pg_control", data_dir); if ((fd = open(ControlFilePath, O_RDONLY | PG_BINARY, 0)) == -1) die(_("%s: could not open file \"%s\" for reading: %s\n"), progname, ControlFilePath, strerror(errno)); if (read(fd, &ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData)) die(_("%s: could not read file \"%s\": %s\n"), progname, ControlFilePath, strerror(errno)); close(fd); snprintf(res, 33, UINT64_FORMAT, ControlFile.system_identifier); return res; } /* * Write contents of recovery.conf or postgresql.auto.conf */ static void WriteRecoveryConf(PQExpBuffer contents) { char filename[MAXPGPATH]; FILE *cf; #if PG_VERSION_NUM >= 120000 sprintf(filename, "%s/postgresql.auto.conf", data_dir); cf = fopen(filename, "a"); #else sprintf(filename, "%s/recovery.conf", data_dir); cf = fopen(filename, "w"); #endif if (cf == NULL) { die(_("%s: could not create file \"%s\": %s\n"), progname, filename, strerror(errno)); } if (fwrite(contents->data, contents->len, 1, cf) != 1) { die(_("%s: could not write to file \"%s\": %s\n"), progname, filename, strerror(errno)); } fclose(cf); #if PG_VERSION_NUM >= 120000 { sprintf(filename, "%s/standby.signal", data_dir); cf = fopen(filename, "w"); if (cf == NULL) { die(_("%s: could not create file \"%s\": %s\n"), progname, filename, strerror(errno)); } fclose(cf); } #endif } /* * Copy file to data */ static void CopyConfFile(char *fromfile, char *tofile, bool append) { char filename[MAXPGPATH]; sprintf(filename, "%s/%s", data_dir, tofile); print_msg(VERBOSITY_DEBUG, _("Copying \"%s\" to \"%s\".\n"), fromfile, filename); copy_file(fromfile, filename, append); } /* * Convert PQconninfoOption array into conninfo string */ static char * PQconninfoParamsToConnstr(const char *const * keywords, const char *const * values) { PQExpBuffer retbuf = createPQExpBuffer(); char *ret; int i = 0; for (i = 0; keywords[i] != NULL; i++) { if (i > 0) appendPQExpBufferChar(retbuf, ' '); appendPQExpBuffer(retbuf, "%s=", keywords[i]); appendPQExpBufferConnstrValue(retbuf, values[i]); } ret = pg_strdup(retbuf->data); destroyPQExpBuffer(retbuf); return ret; } /* * Escape connection info value */ static void appendPQExpBufferConnstrValue(PQExpBuffer buf, const char *str) { const char *s; bool needquotes; /* * If the string consists entirely of plain ASCII characters, no need to * quote it. This is quite conservative, but better safe than sorry. */ needquotes = false; for (s = str; *s; s++) { if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || *s == '_' || *s == '.')) { needquotes = true; break; } } if (needquotes) { appendPQExpBufferChar(buf, '\''); while (*str) { /* ' and \ must be escaped by to \' and \\ */ if (*str == '\'' || *str == '\\') appendPQExpBufferChar(buf, '\\'); appendPQExpBufferChar(buf, *str); str++; } appendPQExpBufferChar(buf, '\''); } else appendPQExpBufferStr(buf, str); } /* * Find the pgport and try a connection */ static void wait_postmaster_connection(const char *connstr) { PGPing res; long pmpid = 0; print_msg(VERBOSITY_VERBOSE, "Waiting for PostgreSQL to accept connections ..."); /* First wait for Postmaster to come up. */ for (;;) { if ((pmpid = get_pgpid()) != 0 && postmaster_is_alive((pid_t) pmpid)) break; pg_usleep(1000000); /* 1 sec */ print_msg(VERBOSITY_VERBOSE, "."); } /* Now wait for Postmaster to either accept connections or die. */ for (;;) { res = PQping(connstr); if (res == PQPING_OK) break; else if (res == PQPING_NO_ATTEMPT) break; /* * Check if the process is still alive. This covers cases where the * postmaster successfully created the pidfile but then crashed without * removing it. */ if (!postmaster_is_alive((pid_t) pmpid)) break; /* No response; wait */ pg_usleep(1000000); /* 1 sec */ print_msg(VERBOSITY_VERBOSE, "."); } print_msg(VERBOSITY_VERBOSE, "\n"); } /* * Wait for PostgreSQL to leave recovery/standby mode */ static void wait_primary_connection(const char *connstr) { bool ispri = false; PGconn *conn = NULL; PGresult *res; wait_postmaster_connection(connstr); print_msg(VERBOSITY_VERBOSE, "Waiting for PostgreSQL to become primary..."); while (!ispri) { if (!conn || PQstatus(conn) != CONNECTION_OK) { if (conn) PQfinish(conn); wait_postmaster_connection(connstr); conn = connectdb(connstr); } res = PQexec(conn, "SELECT pg_is_in_recovery()"); if (PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1 && *PQgetvalue(res, 0, 0) == 'f') ispri = true; else { pg_usleep(1000000); /* 1 sec */ print_msg(VERBOSITY_VERBOSE, "."); } PQclear(res); } PQfinish(conn); print_msg(VERBOSITY_VERBOSE, "\n"); } /* * Wait for postmaster to die */ static void wait_postmaster_shutdown(void) { long pid; print_msg(VERBOSITY_VERBOSE, "Waiting for PostgreSQL to shutdown ..."); for (;;) { if ((pid = get_pgpid()) != 0) { pg_usleep(1000000); /* 1 sec */ print_msg(VERBOSITY_NORMAL, "."); } else break; } print_msg(VERBOSITY_VERBOSE, "\n"); } static bool file_exists(const char *path) { struct stat statbuf; if (stat(path, &statbuf) != 0) return false; return true; } static bool is_pg_dir(const char *path) { struct stat statbuf; char version_file[MAXPGPATH]; if (stat(path, &statbuf) != 0) return false; snprintf(version_file, MAXPGPATH, "%s/PG_VERSION", data_dir); if (stat(version_file, &statbuf) != 0 && errno == ENOENT) { return false; } return true; } /* * copy one file */ static void copy_file(char *fromfile, char *tofile, bool append) { char *buffer; int srcfd; int dstfd; int nbytes; off_t offset; #define COPY_BUF_SIZE (8 * BLCKSZ) buffer = malloc(COPY_BUF_SIZE); /* * Open the files */ srcfd = open(fromfile, O_RDONLY | PG_BINARY, 0); if (srcfd < 0) die(_("could not open file \"%s\""), fromfile); dstfd = open(tofile, O_RDWR | O_CREAT | (append ? O_APPEND : O_TRUNC) | PG_BINARY, S_IRUSR | S_IWUSR); if (dstfd < 0) die(_("could not create file \"%s\""), tofile); /* * Do the data copying. */ for (offset = 0;; offset += nbytes) { nbytes = read(srcfd, buffer, COPY_BUF_SIZE); if (nbytes < 0) die(_("could not read file \"%s\""), fromfile); if (nbytes == 0) break; errno = 0; if ((int) write(dstfd, buffer, nbytes) != nbytes) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; die(_("could not write to file \"%s\""), tofile); } } if (close(dstfd)) die(_("could not close file \"%s\""), tofile); /* we don't care about errors here */ close(srcfd); free(buffer); } static char * find_other_exec_or_die(const char *argv0, const char *target) { int ret; char *found_path; uint32 bin_version; found_path = pg_malloc(MAXPGPATH); ret = find_other_exec_version(argv0, target, &bin_version, found_path); if (ret < 0) { char full_path[MAXPGPATH]; if (find_my_exec(argv0, full_path) < 0) strlcpy(full_path, progname, sizeof(full_path)); if (ret == -1) die(_("The program \"%s\" is needed by %s " "but was not found in the\n" "same directory as \"%s\".\n" "Check your installation.\n"), target, progname, full_path); else die(_("The program \"%s\" was found by \"%s\"\n" "but was not the same version as %s.\n" "Check your installation.\n"), target, full_path, progname); } else { char full_path[MAXPGPATH]; if (find_my_exec(argv0, full_path) < 0) strlcpy(full_path, progname, sizeof(full_path)); if (bin_version / 100 != PG_VERSION_NUM / 100) die(_("The program \"%s\" was found by \"%s\"\n" "but was not the same version as %s.\n" "Check your installation.\n"), target, full_path, progname); } return found_path; } static bool postmaster_is_alive(pid_t pid) { /* * Test to see if the process is still there. Note that we do not * consider an EPERM failure to mean that the process is still there; * EPERM must mean that the given PID belongs to some other userid, and * considering the permissions on $PGDATA, that means it's not the * postmaster we are after. * * Don't believe that our own PID or parent shell's PID is the postmaster, * either. (Windows hasn't got getppid(), though.) */ if (pid == getpid()) return false; #ifndef WIN32 if (pid == getppid()) return false; #endif if (kill(pid, 0) == 0) return true; return false; } static long get_pgpid(void) { FILE *pidf; long pid; pidf = fopen(pid_file, "r"); if (pidf == NULL) { return 0; } if (fscanf(pidf, "%ld", &pid) != 1) { return 0; } fclose(pidf); return pid; } static char ** get_database_list(char *databases, int *n_databases) { char *c; char **result; int num = 1; for (c = databases; *c; c++ ) if (*c == ',') num++; *n_databases = num; result = palloc(num * sizeof(char *)); num = 0; /* clone the argument so we don't destroy it with strtok*/ databases = pstrdup(databases); c = strtok(databases, ","); while (c != NULL) { result[num] = pstrdup(c); num++; c = strtok(NULL,","); } pfree(databases); return result; } static char * generate_restore_point_name(void) { char *rpn = malloc(NAMEDATALEN); snprintf(rpn, NAMEDATALEN-1, "pglogical_create_subscriber_%lx", random()); return rpn; } pglogical-REL2_4_1/pglogical_dependency.c000066400000000000000000002023241415142317000204700ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_dependency.c * pglogical dependenct handling * * Most of the code here is taken from dependency.c as the dependency * handling in postgres is sadly not extensible. * * See * * git diff REL_10_STABLE..REL_11_STABLE -- src/backend/catalog/dependency.c * * for version comparisons, but you can't apply directly as you must * keep old-version compatibility. * * IDENTIFICATION * pglogical_functions.c * *------------------------------------------------------------------------- */ #include "postgres.h" #if PG_VERSION_NUM >= 120000 #include "access/heapam.h" #endif #include "access/htup_details.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/heap.h" #include "catalog/index.h" #include "catalog/objectaccess.h" #include "catalog/pg_am.h" #include "catalog/pg_amop.h" #include "catalog/pg_amproc.h" #include "catalog/pg_attrdef.h" #include "catalog/pg_authid.h" #include "catalog/pg_cast.h" #include "catalog/pg_collation.h" #if PG_VERSION_NUM < 110000 #include "catalog/pg_collation_fn.h" #endif #include "catalog/pg_constraint.h" #include "catalog/pg_conversion.h" #if PG_VERSION_NUM < 110000 #include "catalog/pg_conversion_fn.h" #endif #include "catalog/pg_database.h" #include "catalog/pg_default_acl.h" #include "catalog/pg_event_trigger.h" #include "catalog/pg_extension.h" #include "catalog/pg_foreign_data_wrapper.h" #include "catalog/pg_foreign_server.h" #include "catalog/pg_language.h" #include "catalog/pg_largeobject.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "catalog/pg_opfamily.h" #include "catalog/pg_proc.h" #include "catalog/pg_rewrite.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_trigger.h" #include "catalog/pg_ts_config.h" #include "catalog/pg_ts_dict.h" #include "catalog/pg_ts_parser.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" #include "catalog/pg_user_mapping.h" #include "commands/comment.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/extension.h" #include "commands/proclang.h" #include "commands/schemacmds.h" #include "commands/seclabel.h" #include "commands/trigger.h" #include "commands/typecmds.h" #include "nodes/nodeFuncs.h" #include "parser/parsetree.h" #include "rewrite/rewriteRemove.h" #include "storage/lmgr.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" #if PG_VERSION_NUM < 120000 #include "utils/tqual.h" #endif #include "pglogical_dependency.h" #include "pglogical_sync.h" #include "pglogical_repset.h" #include "pglogical.h" #define CATALOG_REPSET_RELATION "depend" typedef struct FormData_pglogical_depend { /* * Identification of the dependent (referencing) object. * * These fields are all zeroes for a DEPENDENCY_PIN entry. */ Oid classid; /* OID of table containing object */ Oid objid; /* OID of object itself */ int32 objsubid; /* column number, or 0 if not used */ /* * Identification of the independent (referenced) object. */ Oid refclassid; /* OID of table containing object */ Oid refobjid; /* OID of object itself */ int32 refobjsubid; /* column number, or 0 if not used */ /* * Precise semantics of the relationship are specified by the deptype * field. See DependencyType in catalog/dependency.h. */ char deptype; /* see codes in dependency.h */ } FormData_pglogical_depend; typedef FormData_pglogical_depend *Form_pglogical_depend; #define Natts_pglogical_depend 7 #define Anum_pglogical_depend_classid 1 #define Anum_pglogical_depend_objid 2 #define Anum_pglogical_depend_objsubid 3 #define Anum_pglogical_depend_refclassid 4 #define Anum_pglogical_depend_refobjid 5 #define Anum_pglogical_depend_refobjsubid 6 #define Anum_pglogical_depend_deptype 7 static Oid get_pglogical_depend_rel_oid(void); /* * Deletion processing requires additional state for each ObjectAddress that * it's planning to delete. For simplicity and code-sharing we make the * ObjectAddresses code support arrays with or without this extra state. */ typedef struct { int flags; /* bitmask, see bit definitions below */ ObjectAddress dependee; /* object whose deletion forced this one */ } ObjectAddressExtra; /* ObjectAddressExtra flag bits */ #define DEPFLAG_ORIGINAL 0x0001 /* an original deletion target */ #define DEPFLAG_NORMAL 0x0002 /* reached via normal dependency */ #define DEPFLAG_AUTO 0x0004 /* reached via auto dependency */ #define DEPFLAG_INTERNAL 0x0008 /* reached via internal dependency */ #define DEPFLAG_EXTENSION 0x0010 /* reached via extension dependency */ #define DEPFLAG_REVERSE 0x0020 /* reverse internal/extension link */ /* expansible list of ObjectAddresses */ struct ObjectAddresses { ObjectAddress *refs; /* => palloc'd array */ ObjectAddressExtra *extras; /* => palloc'd array, or NULL if not used */ int numrefs; /* current number of references */ int maxrefs; /* current size of palloc'd array(s) */ }; /* typedef ObjectAddresses appears in dependency.h */ /* threaded list of ObjectAddresses, for recursion detection */ typedef struct ObjectAddressStack { const ObjectAddress *object; /* object being visited */ int flags; /* its current flag bits */ struct ObjectAddressStack *next; /* next outer stack level */ } ObjectAddressStack; /* for find_expr_references_walker */ typedef struct { ObjectAddresses *addrs; /* addresses being accumulated */ List *rtables; /* list of rangetables to resolve Vars */ } find_expr_references_context; /* * This constant table maps ObjectClasses to the corresponding catalog OIDs. * See also getObjectClass(). */ static const Oid object_classes[] = { RelationRelationId, /* OCLASS_CLASS */ ProcedureRelationId, /* OCLASS_PROC */ TypeRelationId, /* OCLASS_TYPE */ CastRelationId, /* OCLASS_CAST */ CollationRelationId, /* OCLASS_COLLATION */ ConstraintRelationId, /* OCLASS_CONSTRAINT */ ConversionRelationId, /* OCLASS_CONVERSION */ AttrDefaultRelationId, /* OCLASS_DEFAULT */ LanguageRelationId, /* OCLASS_LANGUAGE */ LargeObjectRelationId, /* OCLASS_LARGEOBJECT */ OperatorRelationId, /* OCLASS_OPERATOR */ OperatorClassRelationId, /* OCLASS_OPCLASS */ OperatorFamilyRelationId, /* OCLASS_OPFAMILY */ AccessMethodRelationId, /* OCLASS_AM */ AccessMethodOperatorRelationId, /* OCLASS_AMOP */ AccessMethodProcedureRelationId, /* OCLASS_AMPROC */ RewriteRelationId, /* OCLASS_REWRITE */ TriggerRelationId, /* OCLASS_TRIGGER */ NamespaceRelationId, /* OCLASS_SCHEMA */ TSParserRelationId, /* OCLASS_TSPARSER */ TSDictionaryRelationId, /* OCLASS_TSDICT */ TSTemplateRelationId, /* OCLASS_TSTEMPLATE */ TSConfigRelationId, /* OCLASS_TSCONFIG */ AuthIdRelationId, /* OCLASS_ROLE */ DatabaseRelationId, /* OCLASS_DATABASE */ TableSpaceRelationId, /* OCLASS_TBLSPACE */ ForeignDataWrapperRelationId, /* OCLASS_FDW */ ForeignServerRelationId, /* OCLASS_FOREIGN_SERVER */ UserMappingRelationId, /* OCLASS_USER_MAPPING */ DefaultAclRelationId, /* OCLASS_DEFACL */ ExtensionRelationId, /* OCLASS_EXTENSION */ EventTriggerRelationId, /* OCLASS_EVENT_TRIGGER */ // PolicyRelationId, /* OCLASS_POLICY */ // TransformRelationId /* OCLASS_TRANSFORM */ }; static void findDependentObjects(const ObjectAddress *object, int flags, ObjectAddressStack *stack, ObjectAddresses *targetObjects, const ObjectAddresses *pendingObjects, Relation *depRel); static void reportDependentObjects(const ObjectAddresses *targetObjects, DropBehavior behavior, int msglevel, const ObjectAddress *origObject); static void PGLAcquireDeletionLock(const ObjectAddress *object, int flags); static void PGLReleaseDeletionLock(const ObjectAddress *object); static bool find_expr_references_walker(Node *node, find_expr_references_context *context); static void eliminate_duplicate_dependencies(ObjectAddresses *addrs); static int object_address_comparator(const void *a, const void *b); static void add_object_address(ObjectClass oclass, Oid objectId, int32 subId, ObjectAddresses *addrs); static void add_exact_object_address_extra(const ObjectAddress *object, const ObjectAddressExtra *extra, ObjectAddresses *addrs); static bool object_address_present_add_flags(const ObjectAddress *object, int flags, ObjectAddresses *addrs); static bool stack_address_present_add_flags(const ObjectAddress *object, int flags, ObjectAddressStack *stack); static void deleteOneObjectDepencencyRecord(const ObjectAddress *object, Relation *depRel); static void deleteOneObject(const ObjectAddress *object, Relation *depRel); static void doDeletion(const ObjectAddress *object); static char *pglogical_getObjectDescription(const ObjectAddress *object); /* * Go through the objects given running the final actions on them, and execute * the actual deletion. */ static void deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel) { int i; /* * Delete all the objects in the proper order. */ for (i = 0; i < targetObjects->numrefs; i++) { ObjectAddress *thisobj = targetObjects->refs + i; deleteOneObject(thisobj, depRel); } } /* * Record a dependency between 2 objects via their respective objectAddress. * The first argument is the dependent object, the second the one it * references. * * This simply creates an entry in pglogical_depend, without any other processing. */ void pglogical_recordDependencyOn(const ObjectAddress *depender, const ObjectAddress *referenced, DependencyType behavior) { pglogical_recordMultipleDependencies(depender, referenced, 1, behavior); } /* * Record multiple dependencies (of the same kind) for a single dependent * object. This has a little less overhead than recording each separately. */ void pglogical_recordMultipleDependencies(const ObjectAddress *depender, const ObjectAddress *referenced, int nreferenced, DependencyType behavior) { Relation dependDesc; HeapTuple tup; int i; bool nulls[Natts_pglogical_depend]; Datum values[Natts_pglogical_depend]; if (nreferenced <= 0) return; /* nothing to do */ dependDesc = table_open(get_pglogical_depend_rel_oid(), RowExclusiveLock); memset(nulls, false, sizeof(nulls)); for (i = 0; i < nreferenced; i++, referenced++) { /* * Record the Dependency. Note we don't bother to check for * duplicate dependencies; there's no harm in them. */ values[Anum_pglogical_depend_classid - 1] = ObjectIdGetDatum(depender->classId); values[Anum_pglogical_depend_objid - 1] = ObjectIdGetDatum(depender->objectId); values[Anum_pglogical_depend_objsubid - 1] = Int32GetDatum(depender->objectSubId); values[Anum_pglogical_depend_refclassid - 1] = ObjectIdGetDatum(referenced->classId); values[Anum_pglogical_depend_refobjid - 1] = ObjectIdGetDatum(referenced->objectId); values[Anum_pglogical_depend_refobjsubid - 1] = Int32GetDatum(referenced->objectSubId); values[Anum_pglogical_depend_deptype - 1] = CharGetDatum((char) behavior); tup = heap_form_tuple(dependDesc->rd_att, values, nulls); CatalogTupleInsert(dependDesc, tup); heap_freetuple(tup); } table_close(dependDesc, RowExclusiveLock); } /* * findDependentObjects - find all objects that depend on 'object' * * For every object that depends on the starting object, acquire a deletion * lock on the object, add it to targetObjects (if not already there), * and recursively find objects that depend on it. An object's dependencies * will be placed into targetObjects before the object itself; this means * that the finished list's order represents a safe deletion order. * * The caller must already have a deletion lock on 'object' itself, * but must not have added it to targetObjects. (Note: there are corner * cases where we won't add the object either, and will also release the * caller-taken lock. This is a bit ugly, but the API is set up this way * to allow easy rechecking of an object's liveness after we lock it. See * notes within the function.) * * When dropping a whole object (subId = 0), we find dependencies for * its sub-objects too. * * object: the object to add to targetObjects and find dependencies on * flags: flags to be ORed into the object's targetObjects entry * stack: list of objects being visited in current recursion; topmost item * is the object that we recursed from (NULL for external callers) * targetObjects: list of objects that are scheduled to be deleted * pendingObjects: list of other objects slated for destruction, but * not necessarily in targetObjects yet (can be NULL if none) * *depRel: already opened pglogical_depend relation */ static void findDependentObjects(const ObjectAddress *object, int flags, ObjectAddressStack *stack, ObjectAddresses *targetObjects, const ObjectAddresses *pendingObjects, Relation *depRel) { ScanKeyData key[3]; int nkeys; SysScanDesc scan; HeapTuple tup; ObjectAddress otherObject; ObjectAddressStack mystack; ObjectAddressExtra extra; /* * If the target object is already being visited in an outer recursion * level, just report the current flags back to that level and exit. This * is needed to avoid infinite recursion in the face of circular * dependencies. * * The stack check alone would result in dependency loops being broken at * an arbitrary point, ie, the first member object of the loop to be * visited is the last one to be deleted. This is obviously unworkable. * However, the check for internal dependency below guarantees that we * will not break a loop at an internal dependency: if we enter the loop * at an "owned" object we will switch and start at the "owning" object * instead. We could probably hack something up to avoid breaking at an * auto dependency, too, if we had to. However there are no known cases * where that would be necessary. */ if (stack_address_present_add_flags(object, flags, stack)) return; /* * It's also possible that the target object has already been completely * processed and put into targetObjects. If so, again we just add the * specified flags to its entry and return. * * (Note: in these early-exit cases we could release the caller-taken * lock, since the object is presumably now locked multiple times; but it * seems not worth the cycles.) */ if (object_address_present_add_flags(object, flags, targetObjects)) return; /* * The target object might be internally dependent on some other object * (its "owner"), and/or be a member of an extension (also considered its * owner). If so, and if we aren't recursing from the owning object, we * have to transform this deletion request into a deletion request of the * owning object. (We'll eventually recurse back to this object, but the * owning object has to be visited first so it will be deleted after.) The * way to find out about this is to scan the pglogical_depend entries that show * what this object depends on. */ ScanKeyInit(&key[0], Anum_pglogical_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); ScanKeyInit(&key[1], Anum_pglogical_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); if (object->objectSubId != 0) { ScanKeyInit(&key[2], Anum_pglogical_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(object->objectSubId)); nkeys = 3; } else nkeys = 2; scan = systable_beginscan(*depRel, InvalidOid, false, NULL, nkeys, key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { Form_pglogical_depend foundDep = (Form_pglogical_depend) GETSTRUCT(tup); otherObject.classId = foundDep->refclassid; otherObject.objectId = foundDep->refobjid; otherObject.objectSubId = foundDep->refobjsubid; switch (foundDep->deptype) { case DEPENDENCY_NORMAL: case DEPENDENCY_AUTO: #if PG_VERSION_NUM >= 90600 case DEPENDENCY_AUTO_EXTENSION: #endif /* no problem */ break; #if PG_VERSION_NUM >= 110000 && PG_VERSION_NUM < 120000 case DEPENDENCY_INTERNAL_AUTO: #endif case DEPENDENCY_INTERNAL: case DEPENDENCY_EXTENSION: /* * This object is part of the internal implementation of * another object, or is part of the extension that is the * other object. We have three cases: * * 1. At the outermost recursion level, we normally disallow * the DROP. (We just ereport here, rather than proceeding, * since no other dependencies are likely to be interesting.) * However, there are exceptions. */ if (stack == NULL) { char *otherObjDesc; /* * Exception 1a: if the owning object is listed in * pendingObjects, just release the caller's lock and * return. We'll eventually complete the DROP when we * reach that entry in the pending list. */ if (pendingObjects && object_address_present(&otherObject, pendingObjects)) { systable_endscan(scan); /* need to release caller's lock; see notes below */ PGLReleaseDeletionLock(object); return; } /* * Exception 1b: if the owning object is the extension * currently being created/altered, it's okay to continue * with the deletion. This allows dropping of an * extension's objects within the extension's scripts, as * well as corner cases such as dropping a transient * object created within such a script. * * Note that pglogical currently does not care about * extension dependencies and CurrentExtensionObject is * not PGDLLIMPORTed so we relax this and just skip any * extension dependencies. */ if (creating_extension && otherObject.classId == ExtensionRelationId /*&& otherObject.objectId == CurrentExtensionObject*/) break; /* No exception applies, so throw the error */ otherObjDesc = pglogical_getObjectDescription(&otherObject); ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot drop %s because %s requires it", pglogical_getObjectDescription(object), otherObjDesc), errhint("You can drop %s instead.", otherObjDesc))); } /* * 2. When recursing from the other end of this dependency, * it's okay to continue with the deletion. This holds when * recursing from a whole object that includes the nominal * other end as a component, too. Since there can be more * than one "owning" object, we have to allow matches that are * more than one level down in the stack. */ if (stack_address_present_add_flags(&otherObject, 0, stack)) break; /* * 3. Not all the owning objects have been visited, so * transform this deletion request into a delete of this * owning object. * * For INTERNAL_AUTO dependencies, we don't enforce this; in * other words, we don't follow the links back to the owning * object. */ #if PG_VERSION_NUM >= 110000 && PG_VERSION_NUM < 120000 if (foundDep->deptype == DEPENDENCY_INTERNAL_AUTO) break; #endif /* * First, release caller's lock on this object and get * deletion lock on the owning object. (We must release * caller's lock to avoid deadlock against a concurrent * deletion of the owning object.) */ PGLReleaseDeletionLock(object); PGLAcquireDeletionLock(&otherObject, 0); /* * The owning object might have been deleted while we waited * to lock it; if so, neither it nor the current object are * interesting anymore. We test this by checking the * pglogical_depend entry (see notes below). */ if (!systable_recheck_tuple(scan, tup)) { systable_endscan(scan); PGLReleaseDeletionLock(&otherObject); return; } /* * Okay, recurse to the owning object instead of proceeding. * * We do not need to stack the current object; we want the * traversal order to be as if the original reference had * linked to the owning object instead of this one. * * The dependency type is a "reverse" dependency: we need to * delete the owning object if this one is to be deleted, but * this linkage is never a reason for an automatic deletion. */ findDependentObjects(&otherObject, DEPFLAG_REVERSE, stack, targetObjects, pendingObjects, depRel); /* And we're done here. */ systable_endscan(scan); return; case DEPENDENCY_PIN: /* * Should not happen; PIN dependencies should have zeroes in * the depender fields... */ elog(ERROR, "incorrect use of PIN dependency with %s", pglogical_getObjectDescription(object)); break; default: elog(ERROR, "unrecognized dependency type '%c' for %s", foundDep->deptype, pglogical_getObjectDescription(object)); break; } } systable_endscan(scan); /* * Now recurse to any dependent objects. We must visit them first since * they have to be deleted before the current object. */ mystack.object = object; /* set up a new stack level */ mystack.flags = flags; mystack.next = stack; ScanKeyInit(&key[0], Anum_pglogical_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); ScanKeyInit(&key[1], Anum_pglogical_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); if (object->objectSubId != 0) { ScanKeyInit(&key[2], Anum_pglogical_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(object->objectSubId)); nkeys = 3; } else nkeys = 2; scan = systable_beginscan(*depRel, InvalidOid, false, NULL, nkeys, key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { Form_pglogical_depend foundDep = (Form_pglogical_depend) GETSTRUCT(tup); int subflags; otherObject.classId = foundDep->classid; otherObject.objectId = foundDep->objid; otherObject.objectSubId = foundDep->objsubid; /* * Must lock the dependent object before recursing to it. */ PGLAcquireDeletionLock(&otherObject, 0); /* * The dependent object might have been deleted while we waited to * lock it; if so, we don't need to do anything more with it. We can * test this cheaply and independently of the object's type by seeing * if the pglogical_depend tuple we are looking at is still live. (If the * object got deleted, the tuple would have been deleted too.) */ if (!systable_recheck_tuple(scan, tup)) { /* release the now-useless lock */ PGLReleaseDeletionLock(&otherObject); /* and continue scanning for dependencies */ continue; } /* Recurse, passing flags indicating the dependency type */ switch (foundDep->deptype) { case DEPENDENCY_NORMAL: subflags = DEPFLAG_NORMAL; break; case DEPENDENCY_AUTO: #if PG_VERSION_NUM >= 90600 case DEPENDENCY_AUTO_EXTENSION: subflags = DEPFLAG_AUTO; break; #endif case DEPENDENCY_INTERNAL: #if PG_VERSION_NUM >= 110000 && PG_VERSION_NUM < 120000 case DEPENDENCY_INTERNAL_AUTO: #endif subflags = DEPFLAG_INTERNAL; break; case DEPENDENCY_EXTENSION: subflags = DEPFLAG_EXTENSION; break; case DEPENDENCY_PIN: /* * For a PIN dependency we just ereport immediately; there * won't be any others to report. */ ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot drop %s because it is required by the database system", pglogical_getObjectDescription(object)))); subflags = 0; /* keep compiler quiet */ break; default: elog(ERROR, "unrecognized dependency type '%c' for %s", foundDep->deptype, pglogical_getObjectDescription(object)); subflags = 0; /* keep compiler quiet */ break; } findDependentObjects(&otherObject, subflags, &mystack, targetObjects, pendingObjects, depRel); } systable_endscan(scan); /* * Finally, we can add the target object to targetObjects. Be careful to * include any flags that were passed back down to us from inner recursion * levels. */ extra.flags = mystack.flags; if (stack) extra.dependee = *stack->object; else memset(&extra.dependee, 0, sizeof(extra.dependee)); add_exact_object_address_extra(object, &extra, targetObjects); } /* * reportDependentObjects - report about dependencies, and fail if RESTRICT * * Tell the user about dependent objects that we are going to delete * (or would need to delete, but are prevented by RESTRICT mode); * then error out if there are any and it's not CASCADE mode. * * targetObjects: list of objects that are scheduled to be deleted * behavior: RESTRICT or CASCADE * msglevel: elog level for non-error report messages * origObject: base object of deletion, or NULL if not available * (the latter case occurs in DROP OWNED) */ static void reportDependentObjects(const ObjectAddresses *targetObjects, DropBehavior behavior, int msglevel, const ObjectAddress *origObject) { bool ok = true; StringInfoData clientdetail; StringInfoData logdetail; int numReportedClient = 0; int numNotReportedClient = 0; int i; int my_client_min_messages; int my_log_min_messages; /* * This is cludge for Windows (Postgres des not define the GUC variables * as PGDDLIMPORT) */ my_client_min_messages = atoi(GetConfigOptionByName("client_min_messages", NULL, false)); my_log_min_messages = atoi(GetConfigOptionByName("log_min_messages", NULL, false)); /* * If no error is to be thrown, and the msglevel is too low to be shown to * either client or server log, there's no need to do any of the work. * * Note: this code doesn't know all there is to be known about elog * levels, but it works for NOTICE and DEBUG2, which are the only values * msglevel can currently have. We also assume we are running in a normal * operating environment. */ if (behavior == DROP_CASCADE && msglevel < my_client_min_messages && (msglevel < my_log_min_messages || my_log_min_messages == LOG)) return; /* * We limit the number of dependencies reported to the client to * MAX_REPORTED_DEPS, since client software may not deal well with * enormous error strings. The server log always gets a full report. */ #define MAX_REPORTED_DEPS 100 initStringInfo(&clientdetail); initStringInfo(&logdetail); /* * We process the list back to front (ie, in dependency order not deletion * order), since this makes for a more understandable display. */ for (i = targetObjects->numrefs - 1; i >= 0; i--) { const ObjectAddress *obj = &targetObjects->refs[i]; const ObjectAddressExtra *extra = &targetObjects->extras[i]; char *objDesc; /* Ignore the original deletion target(s) */ if (extra->flags & DEPFLAG_ORIGINAL) continue; objDesc = pglogical_getObjectDescription(obj); /* * If, at any stage of the recursive search, we reached the object via * an AUTO, INTERNAL, or EXTENSION dependency, then it's okay to * delete it even in RESTRICT mode. */ if (extra->flags & (DEPFLAG_AUTO | DEPFLAG_INTERNAL | DEPFLAG_EXTENSION)) { /* * auto-cascades are reported at DEBUG2, not msglevel. We don't * try to combine them with the regular message because the * results are too confusing when client_min_messages and * log_min_messages are different. */ ereport(DEBUG2, (errmsg("drop auto-cascades to %s", objDesc))); } else if (behavior == DROP_RESTRICT) { char *otherDesc = pglogical_getObjectDescription(&extra->dependee); if (numReportedClient < MAX_REPORTED_DEPS) { /* separate entries with a newline */ if (clientdetail.len != 0) appendStringInfoChar(&clientdetail, '\n'); appendStringInfo(&clientdetail, _("%s depends on %s"), objDesc, otherDesc); numReportedClient++; } else numNotReportedClient++; /* separate entries with a newline */ if (logdetail.len != 0) appendStringInfoChar(&logdetail, '\n'); appendStringInfo(&logdetail, _("%s depends on %s"), objDesc, otherDesc); pfree(otherDesc); ok = false; } else { if (numReportedClient < MAX_REPORTED_DEPS) { /* separate entries with a newline */ if (clientdetail.len != 0) appendStringInfoChar(&clientdetail, '\n'); appendStringInfo(&clientdetail, _("drop cascades to %s"), objDesc); numReportedClient++; } else numNotReportedClient++; /* separate entries with a newline */ if (logdetail.len != 0) appendStringInfoChar(&logdetail, '\n'); appendStringInfo(&logdetail, _("drop cascades to %s"), objDesc); } pfree(objDesc); } if (numNotReportedClient > 0) appendStringInfo(&clientdetail, ngettext("\nand %d other object " "(see server log for list)", "\nand %d other objects " "(see server log for list)", numNotReportedClient), numNotReportedClient); if (!ok) { if (origObject) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot drop %s because other objects depend on it", pglogical_getObjectDescription(origObject)), errdetail("%s", clientdetail.data), errdetail_log("%s", logdetail.data), errhint("Use DROP ... CASCADE to drop the dependent objects too."))); else ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot drop desired object(s) because other objects depend on them"), errdetail("%s", clientdetail.data), errdetail_log("%s", logdetail.data), errhint("Use DROP ... CASCADE to drop the dependent objects too."))); } else if (numReportedClient > 1) { ereport(msglevel, /* translator: %d always has a value larger than 1 */ (errmsg_plural("drop cascades to %d other object", "drop cascades to %d other objects", numReportedClient + numNotReportedClient, numReportedClient + numNotReportedClient), errdetail("%s", clientdetail.data), errdetail_log("%s", logdetail.data))); } else if (numReportedClient == 1) { /* we just use the single item as-is */ ereport(msglevel, (errmsg_internal("%s", clientdetail.data))); } pfree(clientdetail.data); pfree(logdetail.data); } /* * PGLAcquireDeletionLock - acquire a suitable lock for deleting an object * * We use LockRelation for relations, LockDatabaseObject for everything * else. Note that dependency.c is not concerned with deleting any kind of * shared-across-databases object, so we have no need for LockSharedObject. */ static void PGLAcquireDeletionLock(const ObjectAddress *object, int flags) { if (object->classId == RelationRelationId) { /* * In DROP INDEX CONCURRENTLY, take only ShareUpdateExclusiveLock on * the index for the moment. index_drop() will promote the lock once * it's safe to do so. In all other cases we need full exclusive * lock. */ if (flags & PERFORM_DELETION_CONCURRENTLY) LockRelationOid(object->objectId, ShareUpdateExclusiveLock); else LockRelationOid(object->objectId, AccessExclusiveLock); } else { /* assume we should lock the whole object not a sub-object */ LockDatabaseObject(object->classId, object->objectId, 0, AccessExclusiveLock); } } /* * PGLReleaseDeletionLock - release an object deletion lock */ static void PGLReleaseDeletionLock(const ObjectAddress *object) { if (object->classId == RelationRelationId) UnlockRelationOid(object->objectId, AccessExclusiveLock); else /* assume we should lock the whole object not a sub-object */ UnlockDatabaseObject(object->classId, object->objectId, 0, AccessExclusiveLock); } /* * recordDependencyOnSingleRelExpr - find expression dependencies * * As above, but only one relation is expected to be referenced (with * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a * range table. An additional frammish is that dependencies on that * relation (or its component columns) will be marked with 'self_behavior', * whereas 'behavior' is used for everything else. * * NOTE: the caller should ensure that a whole-table dependency on the * specified relation is created separately, if one is needed. In particular, * a whole-row Var "relation.*" will not cause this routine to emit any * dependency item. This is appropriate behavior for subexpressions of an * ordinary query, so other cases need to cope as necessary. */ void pglogical_recordDependencyOnSingleRelExpr(const ObjectAddress *depender, Node *expr, Oid relId, DependencyType behavior, DependencyType self_behavior) { find_expr_references_context context; RangeTblEntry rte; context.addrs = new_object_addresses(); /* We gin up a rather bogus rangetable list to handle Vars */ MemSet(&rte, 0, sizeof(rte)); rte.type = T_RangeTblEntry; rte.rtekind = RTE_RELATION; rte.relid = relId; rte.relkind = RELKIND_RELATION; /* no need for exactness here */ context.rtables = list_make1(list_make1(&rte)); /* Scan the expression tree for referenceable objects */ find_expr_references_walker(expr, &context); /* Remove any duplicates */ eliminate_duplicate_dependencies(context.addrs); /* Separate self-dependencies if necessary */ if (behavior != self_behavior && context.addrs->numrefs > 0) { ObjectAddresses *self_addrs; ObjectAddress *outobj; int oldref, outrefs; self_addrs = new_object_addresses(); outobj = context.addrs->refs; outrefs = 0; for (oldref = 0; oldref < context.addrs->numrefs; oldref++) { ObjectAddress *thisobj = context.addrs->refs + oldref; if (thisobj->classId == RelationRelationId && thisobj->objectId == relId) { /* Move this ref into self_addrs */ add_exact_object_address(thisobj, self_addrs); } else { /* Keep it in context.addrs */ *outobj = *thisobj; outobj++; outrefs++; } } context.addrs->numrefs = outrefs; /* Record the self-dependencies */ pglogical_recordMultipleDependencies(depender, self_addrs->refs, self_addrs->numrefs, self_behavior); free_object_addresses(self_addrs); } /* Record the external dependencies */ pglogical_recordMultipleDependencies(depender, context.addrs->refs, context.addrs->numrefs, behavior); free_object_addresses(context.addrs); } /* * Recursively search an expression tree for object references. * * Note: we avoid creating references to columns of tables that participate * in an SQL JOIN construct, but are not actually used anywhere in the query. * To do so, we do not scan the joinaliasvars list of a join RTE while * scanning the query rangetable, but instead scan each individual entry * of the alias list when we find a reference to it. * * Note: in many cases we do not need to create dependencies on the datatypes * involved in an expression, because we'll have an indirect dependency via * some other object. For instance Var nodes depend on a column which depends * on the datatype, and OpExpr nodes depend on the operator which depends on * the datatype. However we do need a type dependency if there is no such * indirect dependency, as for example in Const and CoerceToDomain nodes. * * Similarly, we don't need to create dependencies on collations except where * the collation is being freshly introduced to the expression. * * This is mostly cloned from find_expr_references_walker in Pg's dependency.c */ static bool find_expr_references_walker(Node *node, find_expr_references_context *context) { if (node == NULL) return false; if (IsA(node, Var)) { Var *var = (Var *) node; List *rtable; RangeTblEntry *rte; /* Find matching rtable entry, or complain if not found */ if (var->varlevelsup >= list_length(context->rtables)) elog(ERROR, "invalid varlevelsup %d", var->varlevelsup); rtable = (List *) list_nth(context->rtables, var->varlevelsup); if (var->varno <= 0 || var->varno > list_length(rtable)) elog(ERROR, "invalid varno %d", var->varno); rte = rt_fetch(var->varno, rtable); /* * A whole-row Var references no specific columns, so adds no new * dependency. (We assume that there is a whole-table dependency * arising from each underlying rangetable entry. While we could * record such a dependency when finding a whole-row Var that * references a relation directly, it's quite unclear how to extend * that to whole-row Vars for JOINs, so it seems better to leave the * responsibility with the range table. Note that this poses some * risks for identifying dependencies of stand-alone expressions: * whole-table references may need to be created separately.) */ if (var->varattno == InvalidAttrNumber) return false; if (rte->rtekind == RTE_RELATION) { /* If it's a plain relation, reference this column */ add_object_address(OCLASS_CLASS, rte->relid, var->varattno, context->addrs); } else if (rte->rtekind == RTE_JOIN) { /* Scan join output column to add references to join inputs */ List *save_rtables; /* We must make the context appropriate for join's level */ save_rtables = context->rtables; context->rtables = list_copy_tail(context->rtables, var->varlevelsup); if (var->varattno <= 0 || var->varattno > list_length(rte->joinaliasvars)) elog(ERROR, "invalid varattno %d", var->varattno); find_expr_references_walker((Node *) list_nth(rte->joinaliasvars, var->varattno - 1), context); list_free(context->rtables); context->rtables = save_rtables; } return false; } else if (IsA(node, Const)) { Const *con = (Const *) node; Oid objoid; /* A constant must depend on the constant's datatype */ add_object_address(OCLASS_TYPE, con->consttype, 0, context->addrs); /* * We must also depend on the constant's collation: it could be * different from the datatype's, if a CollateExpr was const-folded to * a simple constant. However we can save work in the most common * case where the collation is "default", since we know that's pinned. */ if (OidIsValid(con->constcollid) && con->constcollid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, con->constcollid, 0, context->addrs); /* * If it's a regclass or similar literal referring to an existing * object, add a reference to that object. (Currently, only the * regclass and regconfig cases have any likely use, but we may as * well handle all the OID-alias datatypes consistently.) */ if (!con->constisnull) { switch (con->consttype) { case REGPROCOID: case REGPROCEDUREOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(PROCOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_PROC, objoid, 0, context->addrs); break; case REGOPEROID: case REGOPERATOROID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(OPEROID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_OPERATOR, objoid, 0, context->addrs); break; case REGCLASSOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_CLASS, objoid, 0, context->addrs); break; case REGTYPEOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(TYPEOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_TYPE, objoid, 0, context->addrs); break; case REGCONFIGOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(TSCONFIGOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_TSCONFIG, objoid, 0, context->addrs); break; case REGDICTIONARYOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(TSDICTOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_TSDICT, objoid, 0, context->addrs); break; #if PG_VERSION_NUM >= 90500 case REGNAMESPACEOID: objoid = DatumGetObjectId(con->constvalue); if (SearchSysCacheExists1(NAMESPACEOID, ObjectIdGetDatum(objoid))) add_object_address(OCLASS_SCHEMA, objoid, 0, context->addrs); break; /* * Dependencies for regrole should be shared among all * databases, so explicitly inhibit to have dependencies. */ case REGROLEOID: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("constant of the type \"regrole\" cannot be used here"))); break; #endif } } return false; } else if (IsA(node, Param)) { Param *param = (Param *) node; /* A parameter must depend on the parameter's datatype */ add_object_address(OCLASS_TYPE, param->paramtype, 0, context->addrs); /* and its collation, just as for Consts */ if (OidIsValid(param->paramcollid) && param->paramcollid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, param->paramcollid, 0, context->addrs); } else if (IsA(node, FuncExpr)) { FuncExpr *funcexpr = (FuncExpr *) node; add_object_address(OCLASS_PROC, funcexpr->funcid, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, OpExpr)) { OpExpr *opexpr = (OpExpr *) node; add_object_address(OCLASS_OPERATOR, opexpr->opno, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, DistinctExpr)) { DistinctExpr *distinctexpr = (DistinctExpr *) node; add_object_address(OCLASS_OPERATOR, distinctexpr->opno, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, NullIfExpr)) { NullIfExpr *nullifexpr = (NullIfExpr *) node; add_object_address(OCLASS_OPERATOR, nullifexpr->opno, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, ScalarArrayOpExpr)) { ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node; add_object_address(OCLASS_OPERATOR, opexpr->opno, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, Aggref)) { Aggref *aggref = (Aggref *) node; add_object_address(OCLASS_PROC, aggref->aggfnoid, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, WindowFunc)) { WindowFunc *wfunc = (WindowFunc *) node; add_object_address(OCLASS_PROC, wfunc->winfnoid, 0, context->addrs); /* fall through to examine arguments */ } else if (IsA(node, SubPlan)) { /* Extra work needed here if we ever need this case */ elog(ERROR, "already-planned subqueries not supported"); } #if PG_VERSION_NUM >= 110000 else if (IsA(node, FieldSelect)) { FieldSelect *fselect = (FieldSelect *) node; Oid argtype = getBaseType(exprType((Node *) fselect->arg)); Oid reltype = get_typ_typrelid(argtype); /* * We need a dependency on the specific column named in FieldSelect, * assuming we can identify the pg_class OID for it. (Probably we * always can at the moment, but in future it might be possible for * argtype to be RECORDOID.) If we can make a column dependency then * we shouldn't need a dependency on the column's type; but if we * can't, make a dependency on the type, as it might not appear * anywhere else in the expression. */ if (OidIsValid(reltype)) add_object_address(OCLASS_CLASS, reltype, fselect->fieldnum, context->addrs); else add_object_address(OCLASS_TYPE, fselect->resulttype, 0, context->addrs); /* the collation might not be referenced anywhere else, either */ if (OidIsValid(fselect->resultcollid) && fselect->resultcollid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, fselect->resultcollid, 0, context->addrs); } else if (IsA(node, FieldStore)) { FieldStore *fstore = (FieldStore *) node; Oid reltype = get_typ_typrelid(fstore->resulttype); /* similar considerations to FieldSelect, but multiple column(s) */ if (OidIsValid(reltype)) { ListCell *l; foreach(l, fstore->fieldnums) add_object_address(OCLASS_CLASS, reltype, lfirst_int(l), context->addrs); } else add_object_address(OCLASS_TYPE, fstore->resulttype, 0, context->addrs); } #endif else if (IsA(node, RelabelType)) { RelabelType *relab = (RelabelType *) node; /* since there is no function dependency, need to depend on type */ add_object_address(OCLASS_TYPE, relab->resulttype, 0, context->addrs); /* the collation might not be referenced anywhere else, either */ if (OidIsValid(relab->resultcollid) && relab->resultcollid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, relab->resultcollid, 0, context->addrs); } else if (IsA(node, CoerceViaIO)) { CoerceViaIO *iocoerce = (CoerceViaIO *) node; /* since there is no exposed function, need to depend on type */ add_object_address(OCLASS_TYPE, iocoerce->resulttype, 0, context->addrs); } else if (IsA(node, ArrayCoerceExpr)) { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; #if PG_VERSION_NUM < 110000 /* See Pg commit c12d570fa14 */ if (OidIsValid(acoerce->elemfuncid)) add_object_address(OCLASS_PROC, acoerce->elemfuncid, 0, context->addrs); #endif add_object_address(OCLASS_TYPE, acoerce->resulttype, 0, context->addrs); #if PG_VERSION_NUM >= 110000 /* * elemfuncid and coerceexpr are gone, replaced by elemexprstate * as part of arrays-over-domains support; see Pg commit c12d570fa14 */ /* the collation might not be referenced anywhere else, either */ if (OidIsValid(acoerce->resultcollid) && acoerce->resultcollid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, acoerce->resultcollid, 0, context->addrs); #endif /* fall through to examine arguments */ } else if (IsA(node, ConvertRowtypeExpr)) { ConvertRowtypeExpr *cvt = (ConvertRowtypeExpr *) node; /* since there is no function dependency, need to depend on type */ add_object_address(OCLASS_TYPE, cvt->resulttype, 0, context->addrs); } else if (IsA(node, CollateExpr)) { CollateExpr *coll = (CollateExpr *) node; add_object_address(OCLASS_COLLATION, coll->collOid, 0, context->addrs); } else if (IsA(node, RowExpr)) { RowExpr *rowexpr = (RowExpr *) node; add_object_address(OCLASS_TYPE, rowexpr->row_typeid, 0, context->addrs); } else if (IsA(node, RowCompareExpr)) { RowCompareExpr *rcexpr = (RowCompareExpr *) node; ListCell *l; foreach(l, rcexpr->opnos) { add_object_address(OCLASS_OPERATOR, lfirst_oid(l), 0, context->addrs); } foreach(l, rcexpr->opfamilies) { add_object_address(OCLASS_OPFAMILY, lfirst_oid(l), 0, context->addrs); } /* fall through to examine arguments */ } else if (IsA(node, CoerceToDomain)) { CoerceToDomain *cd = (CoerceToDomain *) node; add_object_address(OCLASS_TYPE, cd->resulttype, 0, context->addrs); } #if PG_VERSION_NUM >= 90500 else if (IsA(node, OnConflictExpr)) { OnConflictExpr *onconflict = (OnConflictExpr *) node; if (OidIsValid(onconflict->constraint)) add_object_address(OCLASS_CONSTRAINT, onconflict->constraint, 0, context->addrs); /* fall through to examine arguments */ } #endif else if (IsA(node, SortGroupClause)) { SortGroupClause *sgc = (SortGroupClause *) node; add_object_address(OCLASS_OPERATOR, sgc->eqop, 0, context->addrs); if (OidIsValid(sgc->sortop)) add_object_address(OCLASS_OPERATOR, sgc->sortop, 0, context->addrs); return false; } #if PG_VERSION_NUM >= 110000 else if (IsA(node, WindowClause)) { WindowClause *wc = (WindowClause *) node; if (OidIsValid(wc->startInRangeFunc)) add_object_address(OCLASS_PROC, wc->startInRangeFunc, 0, context->addrs); if (OidIsValid(wc->endInRangeFunc)) add_object_address(OCLASS_PROC, wc->endInRangeFunc, 0, context->addrs); if (OidIsValid(wc->inRangeColl) && wc->inRangeColl != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, wc->inRangeColl, 0, context->addrs); /* fall through to examine substructure */ } #endif else if (IsA(node, Query)) { /* Recurse into RTE subquery or not-yet-planned sublink subquery */ Query *query = (Query *) node; ListCell *lc; bool result; /* * Add whole-relation refs for each plain relation mentioned in the * subquery's rtable. * * Note: query_tree_walker takes care of recursing into RTE_FUNCTION * RTEs, subqueries, etc, so no need to do that here. But keep it * from looking at join alias lists. * * Note: we don't need to worry about collations mentioned in * RTE_VALUES or RTE_CTE RTEs, because those must just duplicate * collations referenced in other parts of the Query. We do have to * worry about collations mentioned in RTE_FUNCTION, but we take care * of those when we recurse to the RangeTblFunction node(s). */ foreach(lc, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); switch (rte->rtekind) { case RTE_RELATION: add_object_address(OCLASS_CLASS, rte->relid, 0, context->addrs); break; default: break; } } /* * If the query is an INSERT or UPDATE, we should create a dependency * on each target column, to prevent the specific target column from * being dropped. Although we will visit the TargetEntry nodes again * during query_tree_walker, we won't have enough context to do this * conveniently, so do it here. */ if (query->commandType == CMD_INSERT || query->commandType == CMD_UPDATE) { RangeTblEntry *rte; if (query->resultRelation <= 0 || query->resultRelation > list_length(query->rtable)) elog(ERROR, "invalid resultRelation %d", query->resultRelation); rte = rt_fetch(query->resultRelation, query->rtable); if (rte->rtekind == RTE_RELATION) { foreach(lc, query->targetList) { TargetEntry *tle = (TargetEntry *) lfirst(lc); if (tle->resjunk) continue; /* ignore junk tlist items */ add_object_address(OCLASS_CLASS, rte->relid, tle->resno, context->addrs); } } } /* * Add dependencies on constraints listed in query's constraintDeps */ foreach(lc, query->constraintDeps) { add_object_address(OCLASS_CONSTRAINT, lfirst_oid(lc), 0, context->addrs); } /* query_tree_walker ignores ORDER BY etc, but we need those opers */ find_expr_references_walker((Node *) query->sortClause, context); find_expr_references_walker((Node *) query->groupClause, context); find_expr_references_walker((Node *) query->windowClause, context); find_expr_references_walker((Node *) query->distinctClause, context); /* Examine substructure of query */ context->rtables = lcons(query->rtable, context->rtables); result = query_tree_walker(query, find_expr_references_walker, (void *) context, QTW_IGNORE_JOINALIASES); context->rtables = list_delete_first(context->rtables); return result; } else if (IsA(node, SetOperationStmt)) { SetOperationStmt *setop = (SetOperationStmt *) node; /* we need to look at the groupClauses for operator references */ find_expr_references_walker((Node *) setop->groupClauses, context); /* fall through to examine child nodes */ } else if (IsA(node, RangeTblFunction)) { RangeTblFunction *rtfunc = (RangeTblFunction *) node; ListCell *ct; /* * Add refs for any datatypes and collations used in a column * definition list for a RECORD function. (For other cases, it should * be enough to depend on the function itself.) */ foreach(ct, rtfunc->funccoltypes) { add_object_address(OCLASS_TYPE, lfirst_oid(ct), 0, context->addrs); } foreach(ct, rtfunc->funccolcollations) { Oid collid = lfirst_oid(ct); if (OidIsValid(collid) && collid != DEFAULT_COLLATION_OID) add_object_address(OCLASS_COLLATION, collid, 0, context->addrs); } } #if PG_VERSION_NUM >= 90500 else if (IsA(node, TableSampleClause)) { TableSampleClause *tsc = (TableSampleClause *) node; add_object_address(OCLASS_PROC, tsc->tsmhandler, 0, context->addrs); /* fall through to examine arguments */ } #endif return expression_tree_walker(node, find_expr_references_walker, (void *) context); } /* * Given an array of dependency references, eliminate any duplicates. */ static void eliminate_duplicate_dependencies(ObjectAddresses *addrs) { ObjectAddress *priorobj; int oldref, newrefs; /* * We can't sort if the array has "extra" data, because there's no way to * keep it in sync. Fortunately that combination of features is not * needed. */ Assert(!addrs->extras); if (addrs->numrefs <= 1) return; /* nothing to do */ /* Sort the refs so that duplicates are adjacent */ qsort((void *) addrs->refs, addrs->numrefs, sizeof(ObjectAddress), object_address_comparator); /* Remove dups */ priorobj = addrs->refs; newrefs = 1; for (oldref = 1; oldref < addrs->numrefs; oldref++) { ObjectAddress *thisobj = addrs->refs + oldref; if (priorobj->classId == thisobj->classId && priorobj->objectId == thisobj->objectId) { if (priorobj->objectSubId == thisobj->objectSubId) continue; /* identical, so drop thisobj */ /* * If we have a whole-object reference and a reference to a part * of the same object, we don't need the whole-object reference * (for example, we don't need to reference both table foo and * column foo.bar). The whole-object reference will always appear * first in the sorted list. */ if (priorobj->objectSubId == 0) { /* replace whole ref with partial */ priorobj->objectSubId = thisobj->objectSubId; continue; } } /* Not identical, so add thisobj to output set */ priorobj++; *priorobj = *thisobj; newrefs++; } addrs->numrefs = newrefs; } /* * qsort comparator for ObjectAddress items */ static int object_address_comparator(const void *a, const void *b) { const ObjectAddress *obja = (const ObjectAddress *) a; const ObjectAddress *objb = (const ObjectAddress *) b; if (obja->classId < objb->classId) return -1; if (obja->classId > objb->classId) return 1; if (obja->objectId < objb->objectId) return -1; if (obja->objectId > objb->objectId) return 1; /* * We sort the subId as an unsigned int so that 0 will come first. See * logic in eliminate_duplicate_dependencies. */ if ((unsigned int) obja->objectSubId < (unsigned int) objb->objectSubId) return -1; if ((unsigned int) obja->objectSubId > (unsigned int) objb->objectSubId) return 1; return 0; } /* * Add an entry to an ObjectAddresses array. * * It is convenient to specify the class by ObjectClass rather than directly * by catalog OID. */ static void add_object_address(ObjectClass oclass, Oid objectId, int32 subId, ObjectAddresses *addrs) { ObjectAddress *item; /* enlarge array if needed */ if (addrs->numrefs >= addrs->maxrefs) { addrs->maxrefs *= 2; addrs->refs = (ObjectAddress *) repalloc(addrs->refs, addrs->maxrefs * sizeof(ObjectAddress)); Assert(!addrs->extras); } /* record this item */ item = addrs->refs + addrs->numrefs; item->classId = object_classes[oclass]; item->objectId = objectId; item->objectSubId = subId; addrs->numrefs++; } /* * Add an entry to an ObjectAddresses array. * * As above, but specify entry exactly and provide some "extra" data too. */ static void add_exact_object_address_extra(const ObjectAddress *object, const ObjectAddressExtra *extra, ObjectAddresses *addrs) { ObjectAddress *item; ObjectAddressExtra *itemextra; /* allocate extra space if first time */ if (!addrs->extras) addrs->extras = (ObjectAddressExtra *) palloc(addrs->maxrefs * sizeof(ObjectAddressExtra)); /* enlarge array if needed */ if (addrs->numrefs >= addrs->maxrefs) { addrs->maxrefs *= 2; addrs->refs = (ObjectAddress *) repalloc(addrs->refs, addrs->maxrefs * sizeof(ObjectAddress)); addrs->extras = (ObjectAddressExtra *) repalloc(addrs->extras, addrs->maxrefs * sizeof(ObjectAddressExtra)); } /* record this item */ item = addrs->refs + addrs->numrefs; *item = *object; itemextra = addrs->extras + addrs->numrefs; *itemextra = *extra; addrs->numrefs++; } /* * As above, except that if the object is present then also OR the given * flags into its associated extra data (which must exist). */ static bool object_address_present_add_flags(const ObjectAddress *object, int flags, ObjectAddresses *addrs) { bool result = false; int i; for (i = addrs->numrefs - 1; i >= 0; i--) { ObjectAddress *thisobj = addrs->refs + i; if (object->classId == thisobj->classId && object->objectId == thisobj->objectId) { if (object->objectSubId == thisobj->objectSubId) { ObjectAddressExtra *thisextra = addrs->extras + i; thisextra->flags |= flags; result = true; } else if (thisobj->objectSubId == 0) { /* * We get here if we find a need to delete a column after * having already decided to drop its whole table. Obviously * we no longer need to drop the subobject, so report that we * found the subobject in the array. But don't plaster its * flags on the whole object. */ result = true; } else if (object->objectSubId == 0) { /* * We get here if we find a need to delete a whole table after * having already decided to drop one of its columns. We * can't report that the whole object is in the array, but we * should mark the subobject with the whole object's flags. * * It might seem attractive to physically delete the column's * array entry, or at least mark it as no longer needing * separate deletion. But that could lead to, e.g., dropping * the column's datatype before we drop the table, which does * not seem like a good idea. This is a very rare situation * in practice, so we just take the hit of doing a separate * DROP COLUMN action even though we know we're gonna delete * the table later. * * Because there could be other subobjects of this object in * the array, this case means we always have to loop through * the whole array; we cannot exit early on a match. */ ObjectAddressExtra *thisextra = addrs->extras + i; thisextra->flags |= flags; } } } return result; } /* * Similar to above, except we search an ObjectAddressStack. */ static bool stack_address_present_add_flags(const ObjectAddress *object, int flags, ObjectAddressStack *stack) { bool result = false; ObjectAddressStack *stackptr; for (stackptr = stack; stackptr; stackptr = stackptr->next) { const ObjectAddress *thisobj = stackptr->object; if (object->classId == thisobj->classId && object->objectId == thisobj->objectId) { if (object->objectSubId == thisobj->objectSubId) { stackptr->flags |= flags; result = true; } else if (thisobj->objectSubId == 0) { /* * We're visiting a column with whole table already on stack. * As in object_address_present_add_flags(), we can skip * further processing of the subobject, but we don't want to * propagate flags for the subobject to the whole object. */ result = true; } else if (object->objectSubId == 0) { /* * We're visiting a table with column already on stack. As in * object_address_present_add_flags(), we should propagate * flags for the whole object to each of its subobjects. */ stackptr->flags |= flags; } } } return result; } /* * Drop dependencies if possible, error if not. */ void pglogical_tryDropDependencies(const ObjectAddress *object, DropBehavior behavior) { Relation depRel; ObjectAddresses *targetObjects; /* * We save some cycles by opening pglogical_depend just once and passing the * Relation pointer down to all the recursive deletion steps. */ depRel = table_open(get_pglogical_depend_rel_oid(), RowExclusiveLock); /* * Construct a list of objects to delete (ie, the given object plus * everything directly or indirectly dependent on it). */ targetObjects = new_object_addresses(); findDependentObjects(object, DEPFLAG_ORIGINAL, NULL, /* empty stack */ targetObjects, NULL, /* no pendingObjects */ &depRel); /* * Check if deletion is allowed, and report about cascaded deletes. */ reportDependentObjects(targetObjects, behavior, NOTICE, object); /* * Unlike the builtin dependency tracking, we don't actually drop * the original object here as it has already been dropped by the time * this function has been called so remove it from the array. */ if (targetObjects->numrefs) targetObjects->numrefs--; deleteObjectsInList(targetObjects, &depRel); deleteOneObjectDepencencyRecord(object, &depRel); CommandCounterIncrement(); /* And clean up */ free_object_addresses(targetObjects); table_close(depRel, RowExclusiveLock); } /* * Remove any dependency records for the given object. * * This used to be part of deleteOneObject */ static void deleteOneObjectDepencencyRecord(const ObjectAddress *object, Relation *depRel) { ScanKeyData key[3]; int nkeys; SysScanDesc scan; HeapTuple tup; ScanKeyInit(&key[0], Anum_pglogical_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); ScanKeyInit(&key[1], Anum_pglogical_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); if (object->objectSubId != 0) { ScanKeyInit(&key[2], Anum_pglogical_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(object->objectSubId)); nkeys = 3; } else nkeys = 2; scan = systable_beginscan(*depRel, InvalidOid, false, NULL, nkeys, key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { simple_heap_delete(*depRel, &tup->t_self); } systable_endscan(scan); } /* * deleteOneObject: delete a single object for performDeletion. * * *depRel is the already-open pglogical_depend relation. */ static void deleteOneObject(const ObjectAddress *object, Relation *depRel) { /* * Delete the object itself, in an object-type-dependent way. * * We used to do this after removing the outgoing dependency links, but it * seems just as reasonable to do it beforehand. In the concurrent case * we *must* do it in this order, because we can't make any transactional * updates before calling doDeletion() --- they'd get committed right * away, which is not cool if the deletion then fails. */ doDeletion(object); /* * Now remove any pglogical_depend records that link from this object to others. * (Any records linking to this object should be gone already.) * * When dropping a whole object (subId = 0), remove all pglogical_depend records * for its sub-objects too. */ deleteOneObjectDepencencyRecord(object, depRel); /* * CommandCounterIncrement here to ensure that preceding changes are all * visible to the next deletion step. */ CommandCounterIncrement(); /* * And we're done! */ } /* * doDeletion: actually delete a single object * * This is customised in pglogical. */ static void doDeletion(const ObjectAddress *object) { if (object->classId == get_replication_set_rel_oid()) drop_replication_set(object->objectId); else if (object->classId == get_replication_set_table_rel_oid()) replication_set_remove_table(object->objectId, object->objectSubId, true); else if (object->classId == get_replication_set_seq_rel_oid()) replication_set_remove_seq(object->objectId, object->objectSubId, true); else elog(ERROR, "unrecognized pglogical object class: %u", object->classId); } /* * Get (cached) oid of the dependency catalog */ static Oid get_pglogical_depend_rel_oid(void) { static Oid dependrelationoid = InvalidOid; if (dependrelationoid == InvalidOid) dependrelationoid = get_pglogical_table_oid(CATALOG_REPSET_RELATION); return dependrelationoid; } /* * Get the object description, first looking into our object description * cache, our own knowledge of pglogical objects and finally standard postgres * way. */ static char * pglogical_getObjectDescription(const ObjectAddress *object) { StringInfoData objdesc; if (object->classId == get_replication_set_rel_oid()) { PGLogicalRepSet *repset; repset = get_replication_set(object->objectId); initStringInfo(&objdesc); appendStringInfo(&objdesc, "replication set %s", repset->name); return objdesc.data; } else if (object->classId == get_replication_set_table_rel_oid() || object->classId == get_replication_set_seq_rel_oid()) { ObjectAddress tbladdr; PGLogicalRepSet *repset; tbladdr.classId = RelationRelationId; tbladdr.objectId = object->objectSubId; tbladdr.objectSubId = 0; repset = get_replication_set(object->objectId); initStringInfo(&objdesc); appendStringInfo(&objdesc, "%s membership in replication set %s", pglogical_getObjectDescription(&tbladdr), repset->name); return objdesc.data; } return getObjectDescription(object); } void pglogical_checkDependency(const ObjectAddress *object, DropBehavior behavior) { HeapTuple tp; Form_pg_class reltup; if (object->classId != RelationRelationId) return; pglogical_tryDropDependencies(object, behavior); tp = SearchSysCache1(RELOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(tp)) return; reltup = (Form_pg_class) GETSTRUCT(tp); if (reltup->relkind == RELKIND_RELATION) drop_table_sync_status(get_namespace_name(reltup->relnamespace), NameStr(reltup->relname)); ReleaseSysCache(tp); } pglogical-REL2_4_1/pglogical_dependency.h000066400000000000000000000022031415142317000204670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_dependency.h * Dependency handling * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_dependency.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_DEPENDENCY_H #define PGLOGICAL_DEPENDENCY_H extern void pglogical_recordDependencyOn(const ObjectAddress *depender, const ObjectAddress *referenced, DependencyType behavior); extern void pglogical_recordMultipleDependencies(const ObjectAddress *depender, const ObjectAddress *referenced, int nreferenced, DependencyType behavior); extern void pglogical_recordDependencyOnSingleRelExpr(const ObjectAddress *depender, Node *expr, Oid relId, DependencyType behavior, DependencyType self_behavior); extern void pglogical_tryDropDependencies(const ObjectAddress *object, DropBehavior behavior); extern void pglogical_checkDependency(const ObjectAddress *object, DropBehavior behavior); #endif /* PGLOGICAL_DEPENDENCY_H */ pglogical-REL2_4_1/pglogical_dump/000077500000000000000000000000001415142317000171505ustar00rootroot00000000000000pglogical-REL2_4_1/pglogical_executor.c000066400000000000000000000202071415142317000202060ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_executor.c * pglogical executor related functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_executor.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/hash.h" #include "access/htup_details.h" #include "access/xact.h" #include "access/xlog.h" #include "catalog/dependency.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_extension.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "commands/trigger.h" #include "executor/executor.h" #include "nodes/nodeFuncs.h" #if PG_VERSION_NUM >= 120000 #include "optimizer/optimizer.h" #else #include "optimizer/planner.h" #endif #include "parser/parse_coerce.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "pglogical_node.h" #include "pglogical_executor.h" #include "pglogical_repset.h" #include "pglogical_queue.h" #include "pglogical_dependency.h" #include "pglogical.h" List *pglogical_truncated_tables = NIL; static DropBehavior pglogical_lastDropBehavior = DROP_RESTRICT; static bool dropping_pglogical_obj = false; static object_access_hook_type next_object_access_hook = NULL; static ProcessUtility_hook_type next_ProcessUtility_hook = NULL; EState * create_estate_for_relation(Relation rel, bool forwrite) { EState *estate; RangeTblEntry *rte; /* Dummy range table entry needed by executor. */ rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; /* Initialize executor state. */ estate = CreateExecutorState(); #if PG_VERSION_NUM >= 120000 ExecInitRangeTable(estate, list_make1(rte)); #elif PG_VERSION_NUM >= 110000 && SECONDQ_VERSION_NUM >= 103 /* 2ndQPostgres 11 r1.3 changes executor API */ estate->es_range_table = alist_add(NULL, rte); #else estate->es_range_table = list_make1(rte); #endif #if PG_VERSION_NUM < 120000 if (rel->trigdesc) estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); #endif estate->es_output_cid = GetCurrentCommandId(forwrite); return estate; } ExprContext * prepare_per_tuple_econtext(EState *estate, TupleDesc tupdesc) { ExprContext *econtext; MemoryContext oldContext; econtext = GetPerTupleExprContext(estate); oldContext = MemoryContextSwitchTo(estate->es_query_cxt); econtext->ecxt_scantuple = ExecInitExtraTupleSlot(estate); MemoryContextSwitchTo(oldContext); ExecSetSlotDescriptor(econtext->ecxt_scantuple, tupdesc); return econtext; } ExprState * pglogical_prepare_row_filter(Node *row_filter) { ExprState *exprstate; Expr *expr; Oid exprtype; exprtype = exprType(row_filter); expr = (Expr *) coerce_to_target_type(NULL, /* no UNKNOWN params here */ row_filter, exprtype, BOOLOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); /* This should never happen but just to be sure. */ if (expr == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot cast the row_filter to boolean"), errhint("You will need to rewrite the row_filter."))); expr = expression_planner(expr); exprstate = ExecInitExpr(expr, NULL); return exprstate; } static void pglogical_start_truncate(void) { pglogical_truncated_tables = NIL; } static void pglogical_finish_truncate(void) { ListCell *tlc; PGLogicalLocalNode *local_node; /* If this is not pglogical node, don't do anything. */ local_node = get_local_node(false, true); if (!local_node || !list_length(pglogical_truncated_tables)) return; foreach (tlc, pglogical_truncated_tables) { Oid reloid = lfirst_oid(tlc); char *nspname; char *relname; List *repsets; StringInfoData json; /* Format the query. */ nspname = get_namespace_name(get_rel_namespace(reloid)); relname = get_rel_name(reloid); /* It's easier to construct json manually than via Jsonb API... */ initStringInfo(&json); appendStringInfo(&json, "{\"schema_name\": "); escape_json(&json, nspname); appendStringInfo(&json, ",\"table_name\": "); escape_json(&json, relname); appendStringInfo(&json, "}"); repsets = get_table_replication_sets(local_node->node->id, reloid); if (list_length(repsets)) { List *repset_names = NIL; ListCell *rlc; foreach (rlc, repsets) { PGLogicalRepSet *repset = (PGLogicalRepSet *) lfirst(rlc); repset_names = lappend(repset_names, pstrdup(repset->name)); } /* Queue the truncate for replication. */ queue_message(repset_names, GetUserId(), QUEUE_COMMAND_TYPE_TRUNCATE, json.data); } } list_free(pglogical_truncated_tables); pglogical_truncated_tables = NIL; } static void pglogical_ProcessUtility( #if PG_VERSION_NUM >= 100000 PlannedStmt *pstmt, #else Node *pstmt, #endif const char *queryString, #if PG_VERSION_NUM >= 140000 bool readOnlyTree, #endif ProcessUtilityContext context, ParamListInfo params, #if PG_VERSION_NUM >= 100000 QueryEnvironment *queryEnv, #endif DestReceiver *dest, #ifdef XCP bool sentToRemote, #endif QueryCompletion *qc) { #if PG_VERSION_NUM >= 100000 Node *parsetree = pstmt->utilityStmt; #else Node *parsetree = pstmt; #define queryEnv NULL #endif #ifndef XCP #define sentToRemote NULL #endif dropping_pglogical_obj = false; if (nodeTag(parsetree) == T_TruncateStmt) pglogical_start_truncate(); if (nodeTag(parsetree) == T_DropStmt) pglogical_lastDropBehavior = ((DropStmt *)parsetree)->behavior; /* There's no reason we should be in a long lived context here */ Assert(CurrentMemoryContext != TopMemoryContext && CurrentMemoryContext != CacheMemoryContext); if (next_ProcessUtility_hook) PGLnext_ProcessUtility_hook(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc); else PGLstandard_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, sentToRemote, qc); if (nodeTag(parsetree) == T_TruncateStmt) pglogical_finish_truncate(); } /* * Handle object drop. * * Calls to dependency tracking code. */ static void pglogical_object_access(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) { if (next_object_access_hook) (*next_object_access_hook) (access, classId, objectId, subId, arg); if (access == OAT_DROP) { ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg; ObjectAddress object; DropBehavior behavior; /* No need to check for internal deletions. */ if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0) return; /* Dropping pglogical itself? */ if (classId == ExtensionRelationId && objectId == get_extension_oid(EXTENSION_NAME, true) && objectId != InvalidOid /* Should not happen but check anyway */) dropping_pglogical_obj = true; /* Dropping relation within pglogical? */ if (classId == RelationRelationId) { Oid relnspoid; Oid pglnspoid; pglnspoid = get_namespace_oid(EXTENSION_NAME, true); relnspoid = get_rel_namespace(objectId); if (pglnspoid == relnspoid) dropping_pglogical_obj = true; } /* * Don't do extra dependency checks for internal objects, those * should be handled by Postgres. */ if (dropping_pglogical_obj) return; /* No local node? */ if (!get_local_node(false, true)) return; ObjectAddressSubSet(object, classId, objectId, subId); if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA) behavior = DROP_CASCADE; else behavior = pglogical_lastDropBehavior; pglogical_checkDependency(&object, behavior); } } void pglogical_executor_init(void) { next_ProcessUtility_hook = ProcessUtility_hook; ProcessUtility_hook = pglogical_ProcessUtility; /* Object access hook */ next_object_access_hook = object_access_hook; object_access_hook = pglogical_object_access; } pglogical-REL2_4_1/pglogical_executor.h000066400000000000000000000014201415142317000202070ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_executor.h * pglogical replication plugin * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_executor.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_EXECUTOR_H #define PGLOGICAL_EXECUTOR_H #include "executor/executor.h" extern List *pglogical_truncated_tables; extern EState *create_estate_for_relation(Relation rel, bool forwrite); extern ExprContext *prepare_per_tuple_econtext(EState *estate, TupleDesc tupdesc); extern ExprState *pglogical_prepare_row_filter(Node *row_filter); extern void pglogical_executor_init(void); #endif /* PGLOGICAL_EXECUTOR_H */ pglogical-REL2_4_1/pglogical_fe.c000066400000000000000000000145141415142317000167460ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical.c * pglogical utility functions shared between backend and frontend * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical.c * *------------------------------------------------------------------------- */ #include "libpq-fe.h" #include "postgres_fe.h" /* Note the order is important for debian here. */ #if !defined(pg_attribute_printf) /* GCC and XLC support format attributes */ #if defined(__GNUC__) || defined(__IBMC__) #define pg_attribute_format_arg(a) __attribute__((format_arg(a))) #define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a))) #else #define pg_attribute_format_arg(a) #define pg_attribute_printf(f,a) #endif #endif #include "pqexpbuffer.h" #include "pglogical_fe.h" static char *PQconninfoParamsToConnstr(const char *const * keywords, const char *const * values); static void appendPQExpBufferConnstrValue(PQExpBuffer buf, const char *str); /* * Find another program in our binary's directory, * and return its version. */ int find_other_exec_version(const char *argv0, const char *target, uint32 *version, char *retpath) { char cmd[MAXPGPATH]; char cmd_output[1024]; FILE *output; int pre_dot = 0, post_dot = 0; if (find_my_exec(argv0, retpath) < 0) return -1; /* Trim off program name and keep just directory */ *last_dir_separator(retpath) = '\0'; canonicalize_path(retpath); /* Now append the other program's name */ snprintf(retpath + strlen(retpath), MAXPGPATH - strlen(retpath), "/%s%s", target, EXE); /* And request the version from the program */ snprintf(cmd, sizeof(cmd), "\"%s\" --version", retpath); if ((output = popen(cmd, "r")) == NULL) { fprintf(stderr, "find_other_exec_version: couldn't open cmd: %s\n", strerror(errno)); return -1; } if (fgets(cmd_output, sizeof(cmd_output), output) == NULL) { int ret = pclose(output); if (WIFEXITED(ret)) fprintf(stderr, "find_other_exec_version: couldn't read output of \"%s\": %d (exited with return code %d)\n", cmd, ret, WEXITSTATUS(ret)); else if (WIFSIGNALED(ret)) fprintf(stderr, "find_other_exec_version: couldn't read output of \"%s\": %d (exited with signal %d)\n", cmd, ret, WTERMSIG(ret)); else fprintf(stderr, "find_other_exec_version: couldn't read output of \"%s\": %d\n", cmd, ret); return -1; } pclose(output); if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) < 1) { fprintf(stderr, "find_other_exec_version: couldn't scan result \"%s\" as version\n", cmd_output); return -2; } /* similar to version number exposed by "server_version_num" but without the minor : 9.6.1 -> 90601 -> 90600 10.1 -> 100001 -> 100000) */ *version = (pre_dot < 10) ? (pre_dot * 100 + post_dot) * 100 : pre_dot * 100 * 100; return 0; } /* * Build connection string from individual parameter. * * dbname can be specified in connstr parameter */ char * pgl_get_connstr(char *connstr, char *dbname, char *options, char **errmsg) { char *ret; int argcount = 1; /* dbname */ int i; const char **keywords; const char **values; PQconninfoOption *conn_opts = NULL; PQconninfoOption *conn_opt; /* * Merge the connection info inputs given in form of connection string * and options */ i = 0; if (connstr && (strncmp(connstr, "postgresql://", 13) == 0 || strncmp(connstr, "postgres://", 11) == 0 || strchr(connstr, '=') != NULL)) { conn_opts = PQconninfoParse(connstr, errmsg); if (conn_opts == NULL) return NULL; for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++) { if (conn_opt->val != NULL && conn_opt->val[0] != '\0') argcount++; } keywords = malloc((argcount + 2) * sizeof(*keywords)); memset(keywords, 0, (argcount + 2) * sizeof(*keywords)); values = malloc((argcount + 2) * sizeof(*values)); memset(values, 0, (argcount + 2) * sizeof(*values)); for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++) { /* If db* parameters were provided, we'll fill them later. */ if (dbname && strcmp(conn_opt->keyword, "dbname") == 0) continue; if (conn_opt->val != NULL && conn_opt->val[0] != '\0') { keywords[i] = conn_opt->keyword; values[i] = conn_opt->val; i++; } } } else { keywords = malloc((argcount + 2) * sizeof(*keywords)); memset(keywords, 0, (argcount + 2) * sizeof(*keywords)); values = malloc((argcount + 2) * sizeof(*values)); memset(values, 0, (argcount + 2) * sizeof(*values)); /* * If connstr was provided but it's not in connection string format and * the dbname wasn't provided then connstr is actually dbname. */ if (connstr && !dbname) dbname = connstr; } if (dbname) { keywords[i] = "dbname"; values[i] = dbname; i++; } if (options) { keywords[i] = "options"; values[i] = options; } ret = PQconninfoParamsToConnstr(keywords, values); /* Connection ok! */ if (values) free(values); free(keywords); if (conn_opts) PQconninfoFree(conn_opts); return ret; } /* * Convert PQconninfoOption array into conninfo string */ static char * PQconninfoParamsToConnstr(const char *const * keywords, const char *const * values) { PQExpBuffer retbuf = createPQExpBuffer(); char *ret; int i = 0; for (i = 0; keywords[i] != NULL; i++) { if (i > 0) appendPQExpBufferChar(retbuf, ' '); appendPQExpBuffer(retbuf, "%s=", keywords[i]); appendPQExpBufferConnstrValue(retbuf, values[i]); } ret = strdup(retbuf->data); destroyPQExpBuffer(retbuf); return ret; } /* * Escape connection info value */ static void appendPQExpBufferConnstrValue(PQExpBuffer buf, const char *str) { const char *s; bool needquotes; /* * If the string consists entirely of plain ASCII characters, no need to * quote it. This is quite conservative, but better safe than sorry. */ needquotes = false; for (s = str; *s; s++) { if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || *s == '_' || *s == '.')) { needquotes = true; break; } } if (needquotes) { appendPQExpBufferChar(buf, '\''); while (*str) { /* ' and \ must be escaped by to \' and \\ */ if (*str == '\'' || *str == '\\') appendPQExpBufferChar(buf, '\\'); appendPQExpBufferChar(buf, *str); str++; } appendPQExpBufferChar(buf, '\''); } else appendPQExpBufferStr(buf, str); } pglogical-REL2_4_1/pglogical_fe.h000066400000000000000000000011571415142317000167520ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_fe.h * pglogical replication plugin * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_fe.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_FE_H #define PGLOGICAL_FE_H extern int find_other_exec_version(const char *argv0, const char *target, uint32 *version, char *retpath); extern char *pgl_get_connstr(char *connstr, char *dbname, char *options, char **errmsg); #endif /* PGLOGICAL_FE_H */ pglogical-REL2_4_1/pglogical_functions.c000066400000000000000000001746551415142317000204010ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_functions.c * pglogical SQL visible interfaces * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_functions.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/commit_ts.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "access/xlog.h" #include "catalog/catalog.h" #include "catalog/heap.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" #include "funcapi.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "pgtime.h" #include "parser/parse_coerce.h" #include "parser/parse_collate.h" #include "parser/parse_expr.h" #include "parser/parse_relation.h" #include "replication/origin.h" #include "replication/reorderbuffer.h" #include "replication/slot.h" #include "storage/ipc.h" #include "storage/latch.h" #include "storage/proc.h" #include "tcop/tcopprot.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/json.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "pgstat.h" #include "pglogical_dependency.h" #include "pglogical_node.h" #include "pglogical_executor.h" #include "pglogical_queue.h" #include "pglogical_relcache.h" #include "pglogical_repset.h" #include "pglogical_rpc.h" #include "pglogical_sync.h" #include "pglogical_worker.h" #include "pglogical.h" /* Node management. */ PG_FUNCTION_INFO_V1(pglogical_create_node); PG_FUNCTION_INFO_V1(pglogical_drop_node); PG_FUNCTION_INFO_V1(pglogical_alter_node_add_interface); PG_FUNCTION_INFO_V1(pglogical_alter_node_drop_interface); /* Subscription management. */ PG_FUNCTION_INFO_V1(pglogical_create_subscription); PG_FUNCTION_INFO_V1(pglogical_drop_subscription); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_interface); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_disable); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_enable); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_add_replication_set); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_remove_replication_set); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_synchronize); PG_FUNCTION_INFO_V1(pglogical_alter_subscription_resynchronize_table); PG_FUNCTION_INFO_V1(pglogical_show_subscription_table); PG_FUNCTION_INFO_V1(pglogical_show_subscription_status); PG_FUNCTION_INFO_V1(pglogical_wait_for_subscription_sync_complete); PG_FUNCTION_INFO_V1(pglogical_wait_for_table_sync_complete); /* Replication set manipulation. */ PG_FUNCTION_INFO_V1(pglogical_create_replication_set); PG_FUNCTION_INFO_V1(pglogical_alter_replication_set); PG_FUNCTION_INFO_V1(pglogical_drop_replication_set); PG_FUNCTION_INFO_V1(pglogical_replication_set_add_table); PG_FUNCTION_INFO_V1(pglogical_replication_set_add_all_tables); PG_FUNCTION_INFO_V1(pglogical_replication_set_remove_table); PG_FUNCTION_INFO_V1(pglogical_replication_set_add_sequence); PG_FUNCTION_INFO_V1(pglogical_replication_set_add_all_sequences); PG_FUNCTION_INFO_V1(pglogical_replication_set_remove_sequence); /* Other manipulation function */ PG_FUNCTION_INFO_V1(pglogical_synchronize_sequence); /* DDL */ PG_FUNCTION_INFO_V1(pglogical_replicate_ddl_command); PG_FUNCTION_INFO_V1(pglogical_queue_truncate); PG_FUNCTION_INFO_V1(pglogical_truncate_trigger_add); PG_FUNCTION_INFO_V1(pglogical_dependency_check_trigger); /* Internal utils */ PG_FUNCTION_INFO_V1(pglogical_gen_slot_name); PG_FUNCTION_INFO_V1(pglogical_node_info); PG_FUNCTION_INFO_V1(pglogical_show_repset_table_info); PG_FUNCTION_INFO_V1(pglogical_table_data_filtered); /* Information */ PG_FUNCTION_INFO_V1(pglogical_version); PG_FUNCTION_INFO_V1(pglogical_version_num); PG_FUNCTION_INFO_V1(pglogical_min_proto_version); PG_FUNCTION_INFO_V1(pglogical_max_proto_version); PG_FUNCTION_INFO_V1(pglogical_xact_commit_timestamp_origin); /* Compatibility for upgrading */ PG_FUNCTION_INFO_V1(pglogical_show_repset_table_info_by_target); static void gen_slot_name(Name slot_name, char *dbname, const char *provider_name, const char *subscriber_name); bool in_pglogical_replicate_ddl_command = false; static PGLogicalLocalNode * check_local_node(bool for_update) { PGLogicalLocalNode *node; node = get_local_node(for_update, true); if (!node) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("current database is not configured as pglogical node"), errhint("create pglogical node first"))); return node; } /* * Create new node */ Datum pglogical_create_node(PG_FUNCTION_ARGS) { char *node_name = NameStr(*PG_GETARG_NAME(0)); char *node_dsn = text_to_cstring(PG_GETARG_TEXT_PP(1)); PGLogicalNode node; PGlogicalInterface nodeif; PGLogicalRepSet repset; node.id = InvalidOid; node.name = node_name; create_node(&node); nodeif.id = InvalidOid; nodeif.name = node.name; nodeif.nodeid = node.id; nodeif.dsn = node_dsn; create_node_interface(&nodeif); /* Create predefined repsets. */ repset.id = InvalidOid; repset.nodeid = node.id; repset.name = DEFAULT_REPSET_NAME; repset.replicate_insert = true; repset.replicate_update = true; repset.replicate_delete = true; repset.replicate_truncate = true; create_replication_set(&repset); repset.id = InvalidOid; repset.nodeid = node.id; repset.name = DEFAULT_INSONLY_REPSET_NAME; repset.replicate_insert = true; repset.replicate_update = false; repset.replicate_delete = false; repset.replicate_truncate = true; create_replication_set(&repset); repset.id = InvalidOid; repset.nodeid = node.id; repset.name = DDL_SQL_REPSET_NAME; repset.replicate_insert = true; repset.replicate_update = false; repset.replicate_delete = false; repset.replicate_truncate = false; create_replication_set(&repset); create_local_node(node.id, nodeif.id); PG_RETURN_OID(node.id); } /* * Drop the named node. * * TODO: support cascade (drop subscribers) */ Datum pglogical_drop_node(PG_FUNCTION_ARGS) { char *node_name = NameStr(*PG_GETARG_NAME(0)); bool ifexists = PG_GETARG_BOOL(1); PGLogicalNode *node; node = get_node_by_name(node_name, ifexists); if (node != NULL) { PGLogicalLocalNode *local_node; List *osubs; List *tsubs; osubs = get_node_subscriptions(node->id, true); tsubs = get_node_subscriptions(node->id, false); if (list_length(osubs) != 0 || list_length(tsubs) != 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot drop node \"%s\" because it still has subscriptions associated with it", node_name), errhint("drop the subscriptions first"))); /* If the node is local node, drop the record as well. */ local_node = get_local_node(true, true); if (local_node && local_node->node->id == node->id) { int res; /* * Also drop all the slots associated with the node. * * We do this via SPI mainly because ReplicationSlotCtl is not * accessible on Windows. */ SPI_connect(); PG_TRY(); { res = SPI_execute("SELECT pg_catalog.pg_drop_replication_slot(slot_name)" " FROM pg_catalog.pg_replication_slots" " WHERE (plugin = 'pglogical_output' OR plugin = 'pglogical')" " AND database = current_database()" " AND slot_name ~ 'pgl_.*'", false, 0); } PG_CATCH(); { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot drop node \"%s\" because one or more replication slots for the node are still active", node_name), errhint("drop the subscriptions connected to the node first"))); } PG_END_TRY(); if (res != SPI_OK_SELECT) elog(ERROR, "SPI query failed: %d", res); SPI_finish(); /* And drop the local node association as well. */ drop_local_node(); } /* Drop all the interfaces. */ drop_node_interfaces(node->id); /* Drop replication sets associated with the node. */ drop_node_replication_sets(node->id); /* Drop the node itself. */ drop_node(node->id); } PG_RETURN_BOOL(node != NULL); } /* * Add interface to a node. */ Datum pglogical_alter_node_add_interface(PG_FUNCTION_ARGS) { char *node_name = NameStr(*PG_GETARG_NAME(0)); char *if_name = NameStr(*PG_GETARG_NAME(1)); char *if_dsn = text_to_cstring(PG_GETARG_TEXT_PP(2)); PGLogicalNode *node; PGlogicalInterface *oldif, newif; node = get_node_by_name(node_name, false); if (node == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("node \"%s\" not found", node_name))); oldif = get_node_interface_by_name(node->id, if_name, true); if (oldif != NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("node \"%s\" already has interface named \"%s\"", node_name, if_name))); newif.id = InvalidOid; newif.name = if_name; newif.nodeid = node->id; newif.dsn = if_dsn; create_node_interface(&newif); PG_RETURN_OID(newif.id); } /* * Drop interface from a node. */ Datum pglogical_alter_node_drop_interface(PG_FUNCTION_ARGS) { char *node_name = NameStr(*PG_GETARG_NAME(0)); char *if_name = NameStr(*PG_GETARG_NAME(1)); PGLogicalNode *node; PGlogicalInterface *oldif; List *other_subs; ListCell *lc; node = get_node_by_name(node_name, false); if (node == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("node \"%s\" not found", node_name))); oldif = get_node_interface_by_name(node->id, if_name, true); if (oldif == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("interface \"%s\" for node node \"%s\" not found", if_name, node_name))); other_subs = get_node_subscriptions(node->id, true); foreach (lc, other_subs) { PGLogicalSubscription *sub = (PGLogicalSubscription *) lfirst(lc); if (oldif->id == sub->origin_if->id) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot drop interface \"%s\" for node \"%s\" because subscription \"%s\" is using it", oldif->name, node->name, sub->name), errhint("change the subscription interface first"))); } drop_node_interface(oldif->id); PG_RETURN_BOOL(true); } /* * Connect two existing nodes. */ Datum pglogical_create_subscription(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); char *provider_dsn = text_to_cstring(PG_GETARG_TEXT_PP(1)); ArrayType *rep_set_names = PG_GETARG_ARRAYTYPE_P(2); bool sync_structure = PG_GETARG_BOOL(3); bool sync_data = PG_GETARG_BOOL(4); ArrayType *forward_origin_names = PG_GETARG_ARRAYTYPE_P(5); Interval *apply_delay = PG_GETARG_INTERVAL_P(6); bool force_text_transfer = PG_GETARG_BOOL(7); PGconn *conn; PGLogicalSubscription sub; PGLogicalSyncStatus sync; PGLogicalNode origin; PGLogicalNode *existing_origin; PGlogicalInterface originif; PGLogicalLocalNode *localnode; PGlogicalInterface targetif; List *replication_sets; List *other_subs; ListCell *lc; NameData slot_name; /* Check that this is actually a node. */ localnode = get_local_node(true, false); /* Now, fetch info about remote node. */ conn = pglogical_connect(provider_dsn, sub_name, "create"); pglogical_remote_node_info(conn, &origin.id, &origin.name, NULL, NULL, NULL); PQfinish(conn); /* Check that we can connect remotely also in replication mode. */ conn = pglogical_connect_replica(provider_dsn, sub_name, "create"); PQfinish(conn); /* Check that local connection works. */ conn = pglogical_connect(localnode->node_if->dsn, sub_name, "create"); PQfinish(conn); /* * Check for existing local representation of remote node and interface * and lock it if it already exists. */ existing_origin = get_node_by_name(origin.name, true); /* * If not found, crate local representation of remote node and interface. */ if (!existing_origin) { create_node(&origin); originif.id = InvalidOid; originif.name = origin.name; originif.nodeid = origin.id; originif.dsn = provider_dsn; create_node_interface(&originif); } else { PGlogicalInterface *existingif; existingif = get_node_interface_by_name(origin.id, origin.name, false); if (strcmp(existingif->dsn, provider_dsn) != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("dsn \"%s\" points to existing node \"%s\" with different dsn \"%s\"", provider_dsn, origin.name, existingif->dsn))); memcpy(&originif, existingif, sizeof(PGlogicalInterface)); } /* * Check for overlapping replication sets. * * Note that we can't use exclusion constraints as we use the * subscriptions table in same manner as system catalog. */ replication_sets = textarray_to_list(rep_set_names); other_subs = get_node_subscriptions(originif.nodeid, true); foreach (lc, other_subs) { PGLogicalSubscription *esub = (PGLogicalSubscription *) lfirst(lc); ListCell *esetcell; foreach (esetcell, esub->replication_sets) { char *existingset = lfirst(esetcell); ListCell *nsetcell; foreach (nsetcell, replication_sets) { char *newset = lfirst(nsetcell); if (strcmp(newset, existingset) == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("existing subscription \"%s\" to node " "\"%s\" already subscribes to replication " "set \"%s\"", esub->name, origin.name, newset))); } } } /* * Create the subscription. * * Note for now we don't care much about the target interface so we fake * it here to be invalid. */ targetif.id = localnode->node_if->id; targetif.nodeid = localnode->node->id; sub.id = InvalidOid; sub.name = sub_name; sub.origin_if = &originif; sub.target_if = &targetif; sub.replication_sets = replication_sets; sub.forward_origins = textarray_to_list(forward_origin_names); sub.enabled = true; gen_slot_name(&slot_name, get_database_name(MyDatabaseId), origin.name, sub_name); sub.slot_name = pstrdup(NameStr(slot_name)); sub.apply_delay = apply_delay; sub.force_text_transfer = force_text_transfer; create_subscription(&sub); /* Create synchronization status for the subscription. */ memset(&sync, 0, sizeof(PGLogicalSyncStatus)); if (sync_structure && sync_data) sync.kind = SYNC_KIND_FULL; else if (sync_structure) sync.kind = SYNC_KIND_STRUCTURE; else if (sync_data) sync.kind = SYNC_KIND_DATA; else sync.kind = SYNC_KIND_INIT; sync.subid = sub.id; sync.status = SYNC_STATUS_INIT; create_local_sync_status(&sync); PG_RETURN_OID(sub.id); } /* * Remove subscribption. */ Datum pglogical_drop_subscription(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); bool ifexists = PG_GETARG_BOOL(1); PGLogicalSubscription *sub; sub = get_subscription_by_name(sub_name, ifexists); if (sub != NULL) { PGLogicalWorker *apply; List *other_subs; PGLogicalLocalNode *node; node = get_local_node(true, false); /* First drop the status. */ drop_subscription_sync_status(sub->id); /* Drop the actual subscription. */ drop_subscription(sub->id); /* * The rest is different depending on if we are doing this on provider * or subscriber. * * For now on provider we just exist (there should be no records * of subscribers on their provider node). */ if (sub->origin->id == node->node->id) PG_RETURN_BOOL(sub != NULL); /* * If the provider node record existed only for the dropped, * subscription, it should be dropped as well. */ other_subs = get_node_subscriptions(sub->origin->id, true); if (list_length(other_subs) == 0) { drop_node_interfaces(sub->origin->id); drop_node(sub->origin->id); } /* Kill the apply to unlock the resources. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); apply = pglogical_apply_find(MyDatabaseId, sub->id); pglogical_worker_kill(apply); LWLockRelease(PGLogicalCtx->lock); /* Wait for the apply to die. */ for (;;) { int rc; LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); apply = pglogical_apply_find(MyDatabaseId, sub->id); if (!pglogical_worker_running(apply)) { LWLockRelease(PGLogicalCtx->lock); break; } LWLockRelease(PGLogicalCtx->lock); CHECK_FOR_INTERRUPTS(); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 1000L); if (rc & WL_POSTMASTER_DEATH) proc_exit(1); ResetLatch(&MyProc->procLatch); } /* * Drop the slot on remote side. * * Note, we can't fail here since we can't assume that the remote node * is still reachable or even alive. */ PG_TRY(); { PGconn *origin_conn = pglogical_connect(sub->origin_if->dsn, sub->name, "cleanup"); pglogical_drop_remote_slot(origin_conn, sub->slot_name); PQfinish(origin_conn); } PG_CATCH(); { FlushErrorState(); elog(WARNING, "could not drop slot \"%s\" on provider, you will probably have to drop it manually", sub->slot_name); } PG_END_TRY(); /* Drop the origin tracking locally. */ replorigin_drop_by_name(sub->slot_name, true, false); } PG_RETURN_BOOL(sub != NULL); } /* * Disable subscription. */ Datum pglogical_alter_subscription_disable(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); bool immediate = PG_GETARG_BOOL(1); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); /* XXX: Only used for locking purposes. */ (void) get_local_node(true, false); sub->enabled = false; alter_subscription(sub); if (immediate) { PGLogicalWorker *apply; if ((IsTransactionBlock() || IsSubTransaction())) ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("alter_subscription_disable with immediate = true " "cannot be run inside a transaction block"))); LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); apply = pglogical_apply_find(MyDatabaseId, sub->id); pglogical_worker_kill(apply); LWLockRelease(PGLogicalCtx->lock); } PG_RETURN_BOOL(true); } /* * Enable subscription. */ Datum pglogical_alter_subscription_enable(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); bool immediate = PG_GETARG_BOOL(1); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); /* XXX: Only used for locking purposes. */ (void) get_local_node(true, false); sub->enabled = true; alter_subscription(sub); /* * There is nothing more to immediate here than running it outside of * transaction. */ if (immediate && (IsTransactionBlock() || IsSubTransaction())) { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("alter_subscription_enable with immediate = true " "cannot be run inside a transaction block"))); } PG_RETURN_BOOL(true); } /* * Switch interface the subscription is using. */ Datum pglogical_alter_subscription_interface(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); char *if_name = NameStr(*PG_GETARG_NAME(1)); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); PGlogicalInterface *new_if; /* XXX: Only used for locking purposes. */ (void) get_local_node(true, false); new_if = get_node_interface_by_name(sub->origin->id, if_name, false); if (new_if->id == sub->origin_if->id) PG_RETURN_BOOL(false); sub->origin_if = new_if; alter_subscription(sub); PG_RETURN_BOOL(true); } /* * Add replication set to subscription. */ Datum pglogical_alter_subscription_add_replication_set(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); char *repset_name = NameStr(*PG_GETARG_NAME(1)); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); ListCell *lc; foreach (lc, sub->replication_sets) { char *rs = (char *) lfirst(lc); if (strcmp(rs, repset_name) == 0) PG_RETURN_BOOL(false); } sub->replication_sets = lappend(sub->replication_sets, repset_name); alter_subscription(sub); PG_RETURN_BOOL(true); } /* * Remove replication set to subscription. */ Datum pglogical_alter_subscription_remove_replication_set(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); char *repset_name = NameStr(*PG_GETARG_NAME(1)); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); ListCell *lc; #if PG_VERSION_NUM < 130000 ListCell *next; ListCell *prev = NULL; #endif #if PG_VERSION_NUM >= 130000 foreach(lc, sub->replication_sets) #else for (lc = list_head(sub->replication_sets); lc; lc = next) #endif { char *rs = (char *) lfirst(lc); #if PG_VERSION_NUM < 130000 /* We might delete the cell so advance it now. */ next = lnext(lc); #endif if (strcmp(rs, repset_name) == 0) { #if PG_VERSION_NUM >= 130000 sub->replication_sets = foreach_delete_current(sub->replication_sets, lc); #else sub->replication_sets = list_delete_cell(sub->replication_sets, lc, prev); #endif alter_subscription(sub); PG_RETURN_BOOL(true); } #if PG_VERSION_NUM < 130000 prev = lc; #endif } PG_RETURN_BOOL(false); } /* * Synchronize all the missing tables. */ Datum pglogical_alter_subscription_synchronize(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); bool truncate = PG_GETARG_BOOL(1); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); PGconn *conn; List *remote_tables; List *local_tables; ListCell *lc; /* Read table list from provider. */ conn = pglogical_connect(sub->origin_if->dsn, sub_name, "sync"); remote_tables = pg_logical_get_remote_repset_tables(conn, sub->replication_sets); PQfinish(conn); local_tables = get_subscription_tables(sub->id); /* Compare with sync status on subscription. And add missing ones. */ foreach (lc, remote_tables) { PGLogicalRemoteRel *remoterel = lfirst(lc); PGLogicalSyncStatus *oldsync = NULL; #if PG_VERSION_NUM < 130000 ListCell *prev = NULL; ListCell *next; #endif ListCell *llc; #if PG_VERSION_NUM >= 130000 foreach(llc, local_tables) #else for (llc = list_head(local_tables); llc; llc = next) #endif { PGLogicalSyncStatus *tablesync = (PGLogicalSyncStatus *) lfirst(llc); #if PG_VERSION_NUM < 130000 /* We might delete the cell so advance it now. */ next = lnext(llc); #endif if (namestrcmp(&tablesync->nspname, remoterel->nspname) == 0 && namestrcmp(&tablesync->relname, remoterel->relname) == 0) { oldsync = tablesync; #if PG_VERSION_NUM >= 130000 local_tables = foreach_delete_current(local_tables, llc); #else local_tables = list_delete_cell(local_tables, llc, prev); #endif break; } else { #if PG_VERSION_NUM < 130000 prev = llc; #endif } } if (!oldsync) { PGLogicalSyncStatus newsync; memset(&newsync, 0, sizeof(PGLogicalSyncStatus)); newsync.kind = SYNC_KIND_DATA; newsync.subid = sub->id; namestrcpy(&newsync.nspname, remoterel->nspname); namestrcpy(&newsync.relname, remoterel->relname); newsync.status = SYNC_STATUS_INIT; create_local_sync_status(&newsync); if (truncate) truncate_table(remoterel->nspname, remoterel->relname); } } /* * Any leftover local tables should not be replicated, remove the status * for them. */ foreach (lc, local_tables) { PGLogicalSyncStatus *tablesync = (PGLogicalSyncStatus *) lfirst(lc); drop_table_sync_status_for_sub(tablesync->subid, NameStr(tablesync->nspname), NameStr(tablesync->relname)); } /* Tell apply to re-read sync statuses. */ pglogical_subscription_changed(sub->id, false); PG_RETURN_BOOL(true); } /* * Resynchronize one existing table. */ Datum pglogical_alter_subscription_resynchronize_table(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); Oid reloid = PG_GETARG_OID(1); bool truncate = PG_GETARG_BOOL(2); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); PGLogicalSyncStatus *oldsync; Relation rel; char *nspname, *relname; rel = table_open(reloid, AccessShareLock); nspname = get_namespace_name(RelationGetNamespace(rel)); relname = RelationGetRelationName(rel); /* Reset sync status of the table. */ oldsync = get_table_sync_status(sub->id, nspname, relname, true); if (oldsync) { if (oldsync->status != SYNC_STATUS_READY && oldsync->status != SYNC_STATUS_SYNCDONE && oldsync->status != SYNC_STATUS_NONE) elog(ERROR, "table %s.%s is already being synchronized", nspname, relname); set_table_sync_status(sub->id, nspname, relname, SYNC_STATUS_INIT, InvalidXLogRecPtr); } else { PGLogicalSyncStatus newsync; memset(&newsync, 0, sizeof(PGLogicalSyncStatus)); newsync.kind = SYNC_KIND_DATA; newsync.subid = sub->id; namestrcpy(&newsync.nspname, nspname); namestrcpy(&newsync.relname, relname); newsync.status = SYNC_STATUS_INIT; create_local_sync_status(&newsync); } table_close(rel, NoLock); if (truncate) truncate_table(nspname, relname); /* Tell apply to re-read sync statuses. */ pglogical_subscription_changed(sub->id, false); PG_RETURN_BOOL(true); } /* * Synchronize one sequence. */ Datum pglogical_synchronize_sequence(PG_FUNCTION_ARGS) { Oid reloid = PG_GETARG_OID(0); /* Check that this is actually a node. */ (void) get_local_node(true, false); synchronize_sequence(reloid); PG_RETURN_BOOL(true); } static char * sync_status_to_string(char status) { switch (status) { case SYNC_STATUS_INIT: return "sync_init"; case SYNC_STATUS_STRUCTURE: return "sync_structure"; case SYNC_STATUS_DATA: return "sync_data"; case SYNC_STATUS_CONSTRAINTS: return "sync_constraints"; case SYNC_STATUS_SYNCWAIT: return "sync_waiting"; case SYNC_STATUS_CATCHUP: return "catchup"; case SYNC_STATUS_SYNCDONE: return "synchronized"; case SYNC_STATUS_READY: return "replicating"; default: return "unknown"; } } /* * Show info about one table. */ Datum pglogical_show_subscription_table(PG_FUNCTION_ARGS) { char *sub_name = NameStr(*PG_GETARG_NAME(0)); Oid reloid = PG_GETARG_OID(1); PGLogicalSubscription *sub = get_subscription_by_name(sub_name, false); char *nspname; char *relname; PGLogicalSyncStatus *sync; char *sync_status; TupleDesc tupdesc; Datum values[3]; bool nulls[3]; HeapTuple result_tuple; tupdesc = CreateTemplateTupleDesc(3); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "nspname", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "relname", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "status", TEXTOID, -1, 0); tupdesc = BlessTupleDesc(tupdesc); nspname = get_namespace_name(get_rel_namespace(reloid)); relname = get_rel_name(reloid); /* Reset sync status of the table. */ sync = get_table_sync_status(sub->id, nspname, relname, true); if (sync) sync_status = sync_status_to_string(sync->status); else sync_status = "unknown"; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); values[0] = CStringGetTextDatum(nspname); values[1] = CStringGetTextDatum(relname); values[2] = CStringGetTextDatum(sync_status); result_tuple = heap_form_tuple(tupdesc, values, nulls); PG_RETURN_DATUM(HeapTupleGetDatum(result_tuple)); } /* * Show info about subscribtion. */ Datum pglogical_show_subscription_status(PG_FUNCTION_ARGS) { List *subscriptions; ListCell *lc; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; Tuplestorestate *tupstore; PGLogicalLocalNode *node; MemoryContext per_query_ctx; MemoryContext oldcontext; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); node = check_local_node(false); if (PG_ARGISNULL(0)) { subscriptions = get_node_subscriptions(node->node->id, false); } else { PGLogicalSubscription *sub; sub = get_subscription_by_name(NameStr(*PG_GETARG_NAME(0)), false); subscriptions = list_make1(sub); } /* Switch into long-lived context to construct returned data structures */ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); tupstore = tuplestore_begin_heap(true, false, work_mem); rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); foreach (lc, subscriptions) { PGLogicalSubscription *sub = lfirst(lc); PGLogicalWorker *apply; Datum values[7]; bool nulls[7]; char *status; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); apply = pglogical_apply_find(MyDatabaseId, sub->id); if (pglogical_worker_running(apply)) { PGLogicalSyncStatus *sync; sync = get_subscription_sync_status(sub->id, true); if (!sync) status = "unknown"; else if (sync->status == SYNC_STATUS_READY) status = "replicating"; else status = "initializing"; } else if (!sub->enabled) status = "disabled"; else status = "down"; LWLockRelease(PGLogicalCtx->lock); values[0] = CStringGetTextDatum(sub->name); values[1] = CStringGetTextDatum(status); values[2] = CStringGetTextDatum(sub->origin->name); values[3] = CStringGetTextDatum(sub->origin_if->dsn); values[4] = CStringGetTextDatum(sub->slot_name); if (sub->replication_sets) values[5] = PointerGetDatum(strlist_to_textarray(sub->replication_sets)); else nulls[5] = true; if (sub->forward_origins) values[6] = PointerGetDatum(strlist_to_textarray(sub->forward_origins)); else nulls[6] = true; tuplestore_putvalues(tupstore, tupdesc, values, nulls); } tuplestore_donestoring(tupstore); PG_RETURN_VOID(); } /* * Create new replication set. */ Datum pglogical_create_replication_set(PG_FUNCTION_ARGS) { PGLogicalRepSet repset; PGLogicalLocalNode *node; node = check_local_node(true); repset.id = InvalidOid; repset.nodeid = node->node->id; repset.name = NameStr(*PG_GETARG_NAME(0)); repset.replicate_insert = PG_GETARG_BOOL(1); repset.replicate_update = PG_GETARG_BOOL(2); repset.replicate_delete = PG_GETARG_BOOL(3); repset.replicate_truncate = PG_GETARG_BOOL(4); create_replication_set(&repset); PG_RETURN_OID(repset.id); } /* * Alter existing replication set. */ Datum pglogical_alter_replication_set(PG_FUNCTION_ARGS) { PGLogicalRepSet *repset; PGLogicalLocalNode *node; if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("set_name cannot be NULL"))); node = check_local_node(true); repset = get_replication_set_by_name(node->node->id, NameStr(*PG_GETARG_NAME(0)), false); if (!PG_ARGISNULL(1)) repset->replicate_insert = PG_GETARG_BOOL(1); if (!PG_ARGISNULL(2)) repset->replicate_update = PG_GETARG_BOOL(2); if (!PG_ARGISNULL(3)) repset->replicate_delete = PG_GETARG_BOOL(3); if (!PG_ARGISNULL(4)) repset->replicate_truncate = PG_GETARG_BOOL(4); alter_replication_set(repset); PG_RETURN_OID(repset->id); } /* * Drop existing replication set. */ Datum pglogical_drop_replication_set(PG_FUNCTION_ARGS) { char *set_name = NameStr(*PG_GETARG_NAME(0)); bool ifexists = PG_GETARG_BOOL(1); PGLogicalRepSet *repset; PGLogicalLocalNode *node; node = check_local_node(true); repset = get_replication_set_by_name(node->node->id, set_name, ifexists); if (repset != NULL) drop_replication_set(repset->id); PG_RETURN_BOOL(repset != NULL); } /* * error context callback for parse failure during pglogical_replication_set_add_table() */ static void add_table_parser_error_callback(void *arg) { const char *row_filter_str = (const char *) arg; errcontext("invalid row_filter expression \"%s\"", row_filter_str); /* * Currently we just suppress any syntax error position report, rather * than transforming to an "internal query" error. It's unlikely that a * type name is complex enough to need positioning. */ errposition(0); } static Node * parse_row_filter(Relation rel, char *row_filter_str) { Node *row_filter = NULL; List *raw_parsetree_list; SelectStmt *stmt; ResTarget *restarget; ParseState *pstate; char *nspname; char *relname; #if PG_VERSION_NUM >= 130000 ParseNamespaceItem *nsitem; #else RangeTblEntry *rte; #endif StringInfoData buf; ErrorContextCallback myerrcontext; nspname = get_namespace_name(RelationGetNamespace(rel)); relname = RelationGetRelationName(rel); /* * Build fake query which includes the expression so that we can * pass it to the parser. */ initStringInfo(&buf); appendStringInfo(&buf, "SELECT %s FROM %s", row_filter_str, quote_qualified_identifier(nspname, relname)); /* Parse it, providing proper error context. */ myerrcontext.callback = add_table_parser_error_callback; myerrcontext.arg = (void *) row_filter_str; myerrcontext.previous = error_context_stack; error_context_stack = &myerrcontext; raw_parsetree_list = pg_parse_query(buf.data); error_context_stack = myerrcontext.previous; /* Validate the output from the parser. */ if (list_length(raw_parsetree_list) != 1) goto fail; #if PG_VERSION_NUM >= 100000 stmt = (SelectStmt *) linitial_node(RawStmt, raw_parsetree_list)->stmt; #else stmt = (SelectStmt *) linitial(raw_parsetree_list); #endif if (stmt == NULL || !IsA(stmt, SelectStmt) || stmt->distinctClause != NIL || stmt->intoClause != NULL || stmt->whereClause != NULL || stmt->groupClause != NIL || stmt->havingClause != NULL || stmt->windowClause != NIL || stmt->valuesLists != NIL || stmt->sortClause != NIL || stmt->limitOffset != NULL || stmt->limitCount != NULL || stmt->lockingClause != NIL || stmt->withClause != NULL || stmt->op != SETOP_NONE) goto fail; if (list_length(stmt->targetList) != 1) goto fail; restarget = (ResTarget *) linitial(stmt->targetList); if (restarget == NULL || !IsA(restarget, ResTarget) || restarget->name != NULL || restarget->indirection != NIL || restarget->val == NULL) goto fail; row_filter = restarget->val; /* * Create a dummy ParseState and insert the target relation as its sole * rangetable entry. We need a ParseState for transformExpr. */ pstate = make_parsestate(NULL); #if PG_VERSION_NUM >= 130000 nsitem = addRangeTableEntryForRelation(pstate, rel, AccessShareLock, NULL, false, true); addNSItemToQuery(pstate, nsitem, true, true, true); #else rte = addRangeTableEntryForRelation(pstate, rel, #if PG_VERSION_NUM >= 120000 AccessShareLock, #endif NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); #endif /* * Transform the expression and check it follows limits of row_filter * which are same as those of CHECK constraint so we can use the builtin * checks for that. * * TODO: make the errors look more informative (currently they will * complain about CHECK constraint. (Possibly add context?) */ row_filter = transformExpr(pstate, row_filter, EXPR_KIND_CHECK_CONSTRAINT); row_filter = coerce_to_boolean(pstate, row_filter, "row_filter"); assign_expr_collations(pstate, row_filter); if (list_length(pstate->p_rtable) != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("only table \"%s\" can be referenced in row_filter", relname))); pfree(buf.data); return row_filter; fail: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid row_filter expression \"%s\"", row_filter_str))); return NULL; /* keep compiler quiet */ } /* * Add replication set / table mapping. */ Datum pglogical_replication_set_add_table(PG_FUNCTION_ARGS) { Name repset_name; Oid reloid; bool synchronize; Node *row_filter = NULL; List *att_list = NIL; PGLogicalRepSet *repset; Relation rel; TupleDesc tupDesc; PGLogicalLocalNode *node; char *nspname; char *relname; StringInfoData json; /* Proccess for required parameters. */ if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("set_name cannot be NULL"))); if (PG_ARGISNULL(1)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); if (PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("synchronize_data cannot be NULL"))); repset_name = PG_GETARG_NAME(0); reloid = PG_GETARG_OID(1); synchronize = PG_GETARG_BOOL(2); /* standard check for node. */ node = check_local_node(true); /* Find the replication set. */ repset = get_replication_set_by_name(node->node->id, NameStr(*repset_name), false); /* * Make sure the relation exists (lock mode has to be the same one as * in replication_set_add_relation). */ rel = table_open(reloid, ShareRowExclusiveLock); tupDesc = RelationGetDescr(rel); nspname = get_namespace_name(RelationGetNamespace(rel)); relname = RelationGetRelationName(rel); /* Proccess att_list. */ if (!PG_ARGISNULL(3)) { ArrayType *att_names = PG_GETARG_ARRAYTYPE_P(3); ListCell *lc; Bitmapset *idattrs; /* fetch bitmap of REPLICATION IDENTITY attributes */ idattrs = RelationGetIndexAttrBitmap(rel, INDEX_ATTR_BITMAP_IDENTITY_KEY); att_list = textarray_to_list(att_names); foreach (lc, att_list) { char *attname = (char *) lfirst(lc); int attnum = get_att_num_by_name(tupDesc, attname); if (attnum < 0) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("table %s does not have column %s", quote_qualified_identifier(nspname, relname), attname))); idattrs = bms_del_member(idattrs, attnum - FirstLowInvalidHeapAttributeNumber); } if (!bms_is_empty(idattrs)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("REPLICA IDENTITY columns must be replicated"))); } /* Proccess row_filter if any. */ if (!PG_ARGISNULL(4)) { row_filter = parse_row_filter(rel, text_to_cstring(PG_GETARG_TEXT_PP(4))); } replication_set_add_table(repset->id, reloid, att_list, row_filter); if (synchronize) { /* It's easier to construct json manually than via Jsonb API... */ initStringInfo(&json); appendStringInfo(&json, "{\"schema_name\": "); escape_json(&json, nspname); appendStringInfo(&json, ",\"table_name\": "); escape_json(&json, relname); appendStringInfo(&json, "}"); /* Queue the synchronize request for replication. */ queue_message(list_make1(repset->name), GetUserId(), QUEUE_COMMAND_TYPE_TABLESYNC, json.data); } /* Cleanup. */ table_close(rel, NoLock); PG_RETURN_BOOL(true); } /* * Add replication set / sequence mapping. */ Datum pglogical_replication_set_add_sequence(PG_FUNCTION_ARGS) { Name repset_name = PG_GETARG_NAME(0); Oid reloid = PG_GETARG_OID(1); bool synchronize = PG_GETARG_BOOL(2); PGLogicalRepSet *repset; Relation rel; PGLogicalLocalNode *node; char *nspname; char *relname; StringInfoData json; node = check_local_node(true); /* Find the replication set. */ repset = get_replication_set_by_name(node->node->id, NameStr(*repset_name), false); /* * Make sure the relation exists (lock mode has to be the same one as * in replication_set_add_relation). */ rel = table_open(reloid, ShareRowExclusiveLock); replication_set_add_seq(repset->id, reloid); if (synchronize) { nspname = get_namespace_name(RelationGetNamespace(rel)); relname = RelationGetRelationName(rel); /* It's easier to construct json manually than via Jsonb API... */ initStringInfo(&json); appendStringInfo(&json, "{\"schema_name\": "); escape_json(&json, nspname); appendStringInfo(&json, ",\"sequence_name\": "); escape_json(&json, relname); appendStringInfo(&json, ",\"last_value\": \""INT64_FORMAT"\"", sequence_get_last_value(reloid)); appendStringInfo(&json, "}"); /* Add sequence to the queue. */ queue_message(list_make1(repset->name), GetUserId(), QUEUE_COMMAND_TYPE_SEQUENCE, json.data); } /* Cleanup. */ table_close(rel, NoLock); PG_RETURN_BOOL(true);} /* * Common function for adding replication set / relation mapping based on * schemas. */ static Datum pglogical_replication_set_add_all_relations(Name repset_name, ArrayType *nsp_names, bool synchronize, char relkind) { PGLogicalRepSet *repset; Relation rel; PGLogicalLocalNode *node; ListCell *lc; List *existing_relations = NIL; node = check_local_node(true); /* Find the replication set. */ repset = get_replication_set_by_name(node->node->id, NameStr(*repset_name), false); existing_relations = replication_set_get_tables(repset->id); existing_relations = list_concat_unique_oid(existing_relations, replication_set_get_seqs(repset->id)); rel = table_open(RelationRelationId, RowExclusiveLock); foreach (lc, textarray_to_list(nsp_names)) { char *nspname = lfirst(lc); Oid nspoid = LookupExplicitNamespace(nspname, false); ScanKeyData skey[1]; SysScanDesc sysscan; HeapTuple tuple; ScanKeyInit(&skey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nspoid)); sysscan = systable_beginscan(rel, ClassNameNspIndexId, true, NULL, 1, skey); while (HeapTupleIsValid(tuple = systable_getnext(sysscan))) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tuple); #if PG_VERSION_NUM < 120000 Oid reloid = HeapTupleGetOid(tuple); #else Oid reloid = reltup->oid; #endif /* * Only add logged relations which are not system relations * (catalog, toast). */ if (reltup->relkind != relkind || reltup->relpersistence != RELPERSISTENCE_PERMANENT || IsSystemClass(reloid, reltup)) continue; if (!list_member_oid(existing_relations, reloid)) { if (relkind == RELKIND_RELATION) replication_set_add_table(repset->id, reloid, NIL, NULL); else replication_set_add_seq(repset->id, reloid); if (synchronize) { char *relname; StringInfoData json; char cmdtype; relname = get_rel_name(reloid); /* It's easier to construct json manually than via Jsonb API... */ initStringInfo(&json); appendStringInfo(&json, "{\"schema_name\": "); escape_json(&json, nspname); switch (relkind) { case RELKIND_RELATION: appendStringInfo(&json, ",\"table_name\": "); escape_json(&json, relname); cmdtype = QUEUE_COMMAND_TYPE_TABLESYNC; break; case RELKIND_SEQUENCE: appendStringInfo(&json, ",\"sequence_name\": "); escape_json(&json, relname); appendStringInfo(&json, ",\"last_value\": \""INT64_FORMAT"\"", sequence_get_last_value(reloid)); cmdtype = QUEUE_COMMAND_TYPE_SEQUENCE; break; default: elog(ERROR, "unsupported relkind '%c'", relkind); } appendStringInfo(&json, "}"); /* Queue the truncate for replication. */ queue_message(list_make1(repset->name), GetUserId(), cmdtype, json.data); } } } systable_endscan(sysscan); } table_close(rel, RowExclusiveLock); PG_RETURN_BOOL(true); } /* * Add replication set / table mapping based on schemas. */ Datum pglogical_replication_set_add_all_tables(PG_FUNCTION_ARGS) { Name repset_name = PG_GETARG_NAME(0); ArrayType *nsp_names = PG_GETARG_ARRAYTYPE_P(1); bool synchronize = PG_GETARG_BOOL(2); return pglogical_replication_set_add_all_relations(repset_name, nsp_names, synchronize, RELKIND_RELATION); } /* * Add replication set / sequence mapping based on schemas. */ Datum pglogical_replication_set_add_all_sequences(PG_FUNCTION_ARGS) { Name repset_name = PG_GETARG_NAME(0); ArrayType *nsp_names = PG_GETARG_ARRAYTYPE_P(1); bool synchronize = PG_GETARG_BOOL(2); return pglogical_replication_set_add_all_relations(repset_name, nsp_names, synchronize, RELKIND_SEQUENCE); } /* * Remove replication set / table mapping. * * Unlike the pglogical_replication_set_add_table, this function does not care * if table is valid or not, as we are just removing the record from repset. */ Datum pglogical_replication_set_remove_table(PG_FUNCTION_ARGS) { Oid reloid = PG_GETARG_OID(1); PGLogicalRepSet *repset; PGLogicalLocalNode *node; node = check_local_node(true); /* Find the replication set. */ repset = get_replication_set_by_name(node->node->id, NameStr(*PG_GETARG_NAME(0)), false); replication_set_remove_table(repset->id, reloid, false); PG_RETURN_BOOL(true); } /* * Remove replication set / sequence mapping. */ Datum pglogical_replication_set_remove_sequence(PG_FUNCTION_ARGS) { Oid seqoid = PG_GETARG_OID(1); PGLogicalRepSet *repset; PGLogicalLocalNode *node; node = check_local_node(true); /* Find the replication set. */ repset = get_replication_set_by_name(node->node->id, NameStr(*PG_GETARG_NAME(0)), false); replication_set_remove_seq(repset->id, seqoid, false); PG_RETURN_BOOL(true); } /* * pglogical_replicate_ddl_command * * Queues the input SQL for replication. */ Datum pglogical_replicate_ddl_command(PG_FUNCTION_ARGS) { text *command = PG_GETARG_TEXT_PP(0); char *query = text_to_cstring(command); int save_nestlevel; List *replication_sets; ListCell *lc; PGLogicalLocalNode *node; StringInfoData cmd; node = check_local_node(false); /* XXX: This is here for backwards compatibility with pre 1.1 extension. */ if (PG_NARGS() < 2) { replication_sets = list_make1(DDL_SQL_REPSET_NAME); } else { ArrayType *rep_set_names = PG_GETARG_ARRAYTYPE_P(1); replication_sets = textarray_to_list(rep_set_names); } /* Validate replication sets. */ foreach(lc, replication_sets) { char *setname = lfirst(lc); (void) get_replication_set_by_name(node->node->id, setname, false); } save_nestlevel = NewGUCNestLevel(); /* Force everything in the query to be fully qualified. */ (void) set_config_option("search_path", "", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0 #if PG_VERSION_NUM >= 90500 , false #endif ); /* Convert the query to json string. */ initStringInfo(&cmd); escape_json(&cmd, query); /* Queue the query for replication. */ queue_message(replication_sets, GetUserId(), QUEUE_COMMAND_TYPE_SQL, cmd.data); /* * Execute the query locally. * Use PG_TRY to ensure in_pglogical_replicate_ddl_command gets cleaned up */ in_pglogical_replicate_ddl_command = true; PG_TRY(); { pglogical_execute_sql_command(query, GetUserNameFromId(GetUserId() #if PG_VERSION_NUM >= 90500 , false #endif ), false); } PG_CATCH(); { in_pglogical_replicate_ddl_command = false; PG_RE_THROW(); } PG_END_TRY(); in_pglogical_replicate_ddl_command = false; /* * Restore the GUC variables we set above. */ AtEOXact_GUC(true, save_nestlevel); PG_RETURN_BOOL(true); } /* * pglogical_queue_trigger * * Trigger which queues the TRUNCATE command. * * This function only writes to internal linked list, actual queueing is done * by pglogical_finish_truncate(). */ Datum pglogical_queue_truncate(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; const char *funcname = "queue_truncate"; MemoryContext oldcontext; PGLogicalLocalNode *local_node; /* Return if this function was called from apply process. */ if (MyPGLogicalWorker) PG_RETURN_VOID(); /* Make sure this is being called as an AFTER TRUNCTATE trigger. */ if (!CALLED_AS_TRIGGER(fcinfo)) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("function \"%s\" was not called by trigger manager", funcname))); if (!TRIGGER_FIRED_AFTER(trigdata->tg_event) || !TRIGGER_FIRED_BY_TRUNCATE(trigdata->tg_event)) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("function \"%s\" must be fired AFTER TRUNCATE", funcname))); /* If this is not pglogical node, don't do anything. */ local_node = get_local_node(false, true); if (!local_node) PG_RETURN_VOID(); /* Make sure the list change survives the trigger call. */ oldcontext = MemoryContextSwitchTo(TopTransactionContext); pglogical_truncated_tables = lappend_oid(pglogical_truncated_tables, RelationGetRelid(trigdata->tg_relation)); MemoryContextSwitchTo(oldcontext); PG_RETURN_VOID(); } /* * pglogical_dependency_check_trigger * * No longer used, present for smoother upgrades. */ Datum pglogical_dependency_check_trigger(PG_FUNCTION_ARGS) { PG_RETURN_VOID(); } Datum pglogical_node_info(PG_FUNCTION_ARGS) { TupleDesc tupdesc; Datum values[5]; bool nulls[5]; HeapTuple htup; char sysid[32]; List *repsets; PGLogicalLocalNode *node; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); tupdesc = BlessTupleDesc(tupdesc); node = get_local_node(false, false); snprintf(sysid, sizeof(sysid), UINT64_FORMAT, GetSystemIdentifier()); repsets = get_node_replication_sets(node->node->id); memset(nulls, 0, sizeof(nulls)); values[0] = ObjectIdGetDatum(node->node->id); values[1] = CStringGetTextDatum(node->node->name); values[2] = CStringGetTextDatum(sysid); values[3] = CStringGetTextDatum(get_database_name(MyDatabaseId)); values[4] = CStringGetTextDatum(stringlist_to_identifierstr(repsets)); htup = heap_form_tuple(tupdesc, values, nulls); PG_RETURN_DATUM(HeapTupleGetDatum(htup)); } /* * Get replication info about table. * * This is called by downstream sync worker on the upstream to obtain * info needed to do initial synchronization correctly. Be careful * about changing it, as it must be upward- and downward-compatible. */ Datum pglogical_show_repset_table_info(PG_FUNCTION_ARGS) { Oid reloid = PG_GETARG_OID(0); ArrayType *rep_set_names = PG_GETARG_ARRAYTYPE_P(1); Relation rel; List *replication_sets; TupleDesc reldesc; TupleDesc rettupdesc; int i; List *att_list = NIL; Datum values[5]; bool nulls[5]; char *nspname; char *relname; HeapTuple htup; PGLogicalLocalNode *node; PGLogicalTableRepInfo *tableinfo; node = get_local_node(false, false); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &rettupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); rettupdesc = BlessTupleDesc(rettupdesc); rel = table_open(reloid, AccessShareLock); reldesc = RelationGetDescr(rel); replication_sets = textarray_to_list(rep_set_names); replication_sets = get_replication_sets(node->node->id, replication_sets, false); nspname = get_namespace_name(RelationGetNamespace(rel)); relname = RelationGetRelationName(rel); /* Build the replication info for the table. */ tableinfo = get_table_replication_info(node->node->id, rel, replication_sets); /* Build the column list. */ for (i = 0; i < reldesc->natts; i++) { Form_pg_attribute att = TupleDescAttr(reldesc,i); /* Skip dropped columns. */ if (att->attisdropped) continue; /* Skip filtered columns if any. */ if (tableinfo->att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, tableinfo->att_list)) continue; att_list = lappend(att_list, NameStr(att->attname)); } /* And now build the result. */ memset(nulls, false, sizeof(nulls)); values[0] = ObjectIdGetDatum(RelationGetRelid(rel)); values[1] = CStringGetTextDatum(nspname); values[2] = CStringGetTextDatum(relname); values[3] = PointerGetDatum(strlist_to_textarray(att_list)); values[4] = BoolGetDatum(list_length(tableinfo->row_filter) > 0); htup = heap_form_tuple(rettupdesc, values, nulls); table_close(rel, NoLock); PG_RETURN_DATUM(HeapTupleGetDatum(htup)); } /* * Dummy function to allow upgrading through all intermediate versions */ Datum pglogical_show_repset_table_info_by_target(PG_FUNCTION_ARGS) { abort(); } /* * Decide if to return tuple or not. */ static bool filter_tuple(HeapTuple htup, ExprContext *econtext, List *row_filter_list) { ListCell *lc; ExecStoreHeapTuple(htup, econtext->ecxt_scantuple, false); foreach (lc, row_filter_list) { ExprState *exprstate = (ExprState *) lfirst(lc); Datum res; bool isnull; res = ExecEvalExpr(exprstate, econtext, &isnull, NULL); /* NULL is same as false for our use. */ if (isnull) return false; if (!DatumGetBool(res)) return false; } return true; } /* * Do sequential table scan and return all rows that pass the row filter(s) * defined in speficied replication set(s) for a table. * * This is called by downstream sync worker on the upstream to obtain * filtered data for initial COPY. */ Datum pglogical_table_data_filtered(PG_FUNCTION_ARGS) { Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); Oid reloid; ArrayType *rep_set_names; ReturnSetInfo *rsi; Relation rel; List *replication_sets; ListCell *lc; TupleDesc tupdesc; TupleDesc reltupdesc; TableScanDesc scandesc; HeapTuple htup; List *row_filter_list = NIL; EState *estate; ExprContext *econtext; Tuplestorestate *tupstore; PGLogicalLocalNode *node; PGLogicalTableRepInfo *tableinfo; MemoryContext per_query_ctx; MemoryContext oldcontext; node = get_local_node(false, false); /* Validate parameter. */ if (PG_ARGISNULL(1)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); if (PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("repsets cannot be NULL"))); reloid = PG_GETARG_OID(1); rep_set_names = PG_GETARG_ARRAYTYPE_P(2); if (!type_is_rowtype(argtype)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument of %s must be a row type", "pglogical_table_data_filtered"))); rsi = (ReturnSetInfo *) fcinfo->resultinfo; if (!rsi || !IsA(rsi, ReturnSetInfo) || (rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " "cannot accept a set"))); /* Switch into long-lived context to construct returned data structures */ per_query_ctx = rsi->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* * get the tupdesc from the result set info - it must be a record type * because we already checked that arg1 is a record type, or we're in a * to_record function which returns a setof record. */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); tupdesc = BlessTupleDesc(tupdesc); /* Prepare output tuple store. */ tupstore = tuplestore_begin_heap(false, false, work_mem); rsi->returnMode = SFRM_Materialize; rsi->setResult = tupstore; rsi->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); /* Check output type and table row type are the same. */ rel = table_open(reloid, AccessShareLock); reltupdesc = RelationGetDescr(rel); if (!equalTupleDescs(tupdesc, reltupdesc)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type of %s must be same as row type of the relation", "pglogical_table_data_filtered"))); /* Build the replication info for the table. */ replication_sets = textarray_to_list(rep_set_names); replication_sets = get_replication_sets(node->node->id, replication_sets, false); tableinfo = get_table_replication_info(node->node->id, rel, replication_sets); /* Prepare executor. */ estate = create_estate_for_relation(rel, false); econtext = prepare_per_tuple_econtext(estate, reltupdesc); /* Prepare the row filter expression. */ foreach (lc, tableinfo->row_filter) { Node *row_filter = (Node *) lfirst(lc); ExprState *exprstate = pglogical_prepare_row_filter(row_filter); row_filter_list = lappend(row_filter_list, exprstate); } /* Scan the table. */ scandesc = table_beginscan(rel, GetActiveSnapshot(), 0, NULL); while (HeapTupleIsValid(htup = heap_getnext(scandesc, ForwardScanDirection))) { if (!filter_tuple(htup, econtext, row_filter_list)) continue; tuplestore_puttuple(tupstore, htup); } /* Cleanup. */ ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple); FreeExecutorState(estate); heap_endscan(scandesc); table_close(rel, NoLock); PG_RETURN_NULL(); } /* * Wait for subscription and initial sync to complete, or, if relation info is * given, for sync to complete for a specific table. * * We have to play games with snapshots to achieve this, since we're looking at * pglogical tables in the future as far as our snapshot is concerned. */ static void pglogical_wait_for_sync_complete(char *subscription_name, char *relnamespace, char *relname) { PGLogicalSubscription *sub; /* * If we wait in SERIALIZABLE, then the next snapshot after we return * won't reflect the new state. */ if (IsolationUsesXactSnapshot()) elog(ERROR, "cannot wait for sync in REPEATABLE READ or SERIALIZABLE isolation"); sub = get_subscription_by_name(subscription_name, false); do { PGLogicalSyncStatus *subsync; List *tables; bool isdone = false; int rc; /* We need to see the latest rows */ PushActiveSnapshot(GetLatestSnapshot()); subsync = get_subscription_sync_status(sub->id, true); isdone = subsync && subsync->status == SYNC_STATUS_READY; free_sync_status(subsync); if (isdone) { /* * Subscription itself is synced, but what about separately * synced tables? */ if (relname != NULL) { PGLogicalSyncStatus *table = get_table_sync_status(sub->id, relnamespace, relname, false); isdone = table && table->status == SYNC_STATUS_READY; free_sync_status(table); } else { /* * XXX This is plenty inefficient and we should probably just do a direct catalog * scan, but meh, it hardly has to be fast. */ ListCell *lc; tables = get_unsynced_tables(sub->id); isdone = tables == NIL; foreach (lc, tables) { PGLogicalSyncStatus *table = lfirst(lc); free_sync_status(table); } list_free(tables); } } PopActiveSnapshot(); if (isdone) break; CHECK_FOR_INTERRUPTS(); /* some kind of backoff could be useful here */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 200L); if (rc & WL_POSTMASTER_DEATH) proc_exit(1); ResetLatch(&MyProc->procLatch); } while (1); } Datum pglogical_wait_for_subscription_sync_complete(PG_FUNCTION_ARGS) { char *subscription_name = NameStr(*PG_GETARG_NAME(0)); pglogical_wait_for_sync_complete(subscription_name, NULL, NULL); PG_RETURN_VOID(); } Datum pglogical_wait_for_table_sync_complete(PG_FUNCTION_ARGS) { char *subscription_name = NameStr(*PG_GETARG_NAME(0)); Oid relid = PG_GETARG_OID(1); char *relname, *relnamespace; relname = get_rel_name(relid); relnamespace = get_namespace_name(get_rel_namespace(relid)); pglogical_wait_for_sync_complete(subscription_name, relnamespace, relname); PG_RETURN_VOID(); } /* * Like pg_xact_commit_timestamp but extended for replorigin * too. */ Datum pglogical_xact_commit_timestamp_origin(PG_FUNCTION_ARGS) { #ifdef HAVE_REPLICATION_ORIGINS TransactionId xid = PG_GETARG_UINT32(0); TimestampTz ts; RepOriginId origin; bool found; #endif TupleDesc tupdesc; Datum values[2]; bool nulls[2] = {false, false}; HeapTuple tup; /* * Construct a tuple descriptor for the result row. Must match the * function declaration. */ tupdesc = CreateTemplateTupleDesc(2); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "timestamp", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "roident", OIDOID, -1, 0); tupdesc = BlessTupleDesc(tupdesc); #ifdef HAVE_REPLICATION_ORIGINS found = TransactionIdGetCommitTsData(xid, &ts, &origin); if (found) { values[0] = TimestampTzGetDatum(ts); values[1] = ObjectIdGetDatum(origin); } else #endif { values[0] = (Datum)0; nulls[0] = true; values[1] = (Datum)0; nulls[1] = true; } tup = heap_form_tuple(tupdesc, values, nulls); PG_RETURN_DATUM(HeapTupleGetDatum(tup)); } Datum pglogical_gen_slot_name(PG_FUNCTION_ARGS) { char *dbname = NameStr(*PG_GETARG_NAME(0)); char *provider_node_name = NameStr(*PG_GETARG_NAME(1)); char *subscription_name = NameStr(*PG_GETARG_NAME(2)); Name slot_name; slot_name = (Name) palloc0(NAMEDATALEN); gen_slot_name(slot_name, dbname, provider_node_name, subscription_name); PG_RETURN_NAME(slot_name); } /* * Generate slot name (used also for origin identifier) * * The current format is: * pgl___ * * Note that we want to leave enough free space for 8 bytes of suffix * which in practice means 9 bytes including the underscore. */ static void gen_slot_name(Name slot_name, char *dbname, const char *provider_node, const char *subscription_name) { char *cp; memset(NameStr(*slot_name), 0, NAMEDATALEN); snprintf(NameStr(*slot_name), NAMEDATALEN, "pgl_%s_%s_%s", shorten_hash(dbname, 16), shorten_hash(provider_node, 16), shorten_hash(subscription_name, 16)); NameStr(*slot_name)[NAMEDATALEN-1] = '\0'; /* Replace all the invalid characters in slot name with underscore. */ for (cp = NameStr(*slot_name); *cp; cp++) { if (!((*cp >= 'a' && *cp <= 'z') || (*cp >= '0' && *cp <= '9') || (*cp == '_'))) { *cp = '_'; } } } Datum pglogical_version(PG_FUNCTION_ARGS) { PG_RETURN_TEXT_P(cstring_to_text(PGLOGICAL_VERSION)); } Datum pglogical_version_num(PG_FUNCTION_ARGS) { PG_RETURN_INT32(PGLOGICAL_VERSION_NUM); } Datum pglogical_max_proto_version(PG_FUNCTION_ARGS) { PG_RETURN_INT32(PGLOGICAL_MAX_PROTO_VERSION_NUM); } Datum pglogical_min_proto_version(PG_FUNCTION_ARGS) { PG_RETURN_INT32(PGLOGICAL_MIN_PROTO_VERSION_NUM); } /* Dummy functions for backward comptibility. */ Datum pglogical_truncate_trigger_add(PG_FUNCTION_ARGS) { PG_RETURN_VOID(); } PGDLLEXPORT extern Datum pglogical_hooks_setup(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(pglogical_hooks_setup); Datum pglogical_hooks_setup(PG_FUNCTION_ARGS) { PG_RETURN_VOID(); } pglogical-REL2_4_1/pglogical_manager.c000066400000000000000000000134621415142317000177670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_manager.c * pglogical worker for managing apply workers in a database * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_manager.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/xact.h" #include "commands/dbcommands.h" #include "commands/extension.h" #include "storage/ipc.h" #include "storage/proc.h" #include "utils/memutils.h" #include "utils/resowner.h" #include "utils/timestamp.h" #include "pgstat.h" #include "pglogical_node.h" #include "pglogical_worker.h" #include "pglogical.h" #define INITIAL_SLEEP 10000L #define MAX_SLEEP 180000L #define MIN_SLEEP 5000L void pglogical_manager_main(Datum main_arg); /* * Manage the apply workers - start new ones, kill old ones. */ static bool manage_apply_workers(void) { PGLogicalLocalNode *node; List *subscriptions; List *workers; List *subs_to_start = NIL; ListCell *slc, *wlc; bool ret = true; /* Get list of existing workers. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); workers = pglogical_apply_find_all(MyPGLogicalWorker->dboid); LWLockRelease(PGLogicalCtx->lock); StartTransactionCommand(); /* Get local node, exit if no found. */ node = get_local_node(true, true); if (!node) proc_exit(0); /* Get list of subscribers. */ subscriptions = get_node_subscriptions(node->node->id, false); /* Check for active workers for each subscription. */ foreach (slc, subscriptions) { PGLogicalSubscription *sub = (PGLogicalSubscription *) lfirst(slc); PGLogicalWorker *apply = NULL; #if PG_VERSION_NUM < 130000 ListCell *next; ListCell *prev = NULL; #endif /* * Skip if subscriber not enabled. * This must be called before the following search loop because * we want to kill any workers for disabled subscribers. */ if (!sub->enabled) continue; /* Check if the subscriber already has registered worker. */ #if PG_VERSION_NUM >= 130000 foreach(wlc, workers) #else for (wlc = list_head(workers); wlc; wlc = next) #endif { apply = (PGLogicalWorker *) lfirst(wlc); #if PG_VERSION_NUM < 130000 /* We might delete the cell so advance it now. */ next = lnext(wlc); #endif if (apply->worker.apply.subid == sub->id) { #if PG_VERSION_NUM >= 130000 workers = foreach_delete_current(workers, wlc); #else workers = list_delete_cell(workers, wlc, prev); #endif break; } else { #if PG_VERSION_NUM < 130000 prev = wlc; #endif } } /* If the subscriber does not have a registered worker. */ if (!wlc) apply = NULL; /* Skip if the worker was alrady registered. */ if (pglogical_worker_running(apply)) continue; /* Check if this is crashed worker and if we want to restart it now. */ if (apply) { if (apply->crashed_at != 0) { TimestampTz restart_time; restart_time = TimestampTzPlusMilliseconds(apply->crashed_at, MIN_SLEEP); if (restart_time > GetCurrentTimestamp()) { ret = false; continue; } } else { ret = false; continue; } } subs_to_start = lappend(subs_to_start, sub); } foreach (slc, subs_to_start) { PGLogicalSubscription *sub = (PGLogicalSubscription *) lfirst(slc); PGLogicalWorker apply; memset(&apply, 0, sizeof(PGLogicalWorker)); apply.worker_type = PGLOGICAL_WORKER_APPLY; apply.dboid = MyPGLogicalWorker->dboid; apply.worker.apply.subid = sub->id; apply.worker.apply.sync_pending = true; apply.worker.apply.replay_stop_lsn = InvalidXLogRecPtr; pglogical_worker_register(&apply); } CommitTransactionCommand(); /* Kill any remaining running workers that should not be running. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); foreach (wlc, workers) { PGLogicalWorker *worker = (PGLogicalWorker *) lfirst(wlc); pglogical_worker_kill(worker); /* Cleanup old info about crashed apply workers. */ if (worker && worker->crashed_at != 0) { elog(DEBUG2, "cleaning pglogical worker slot %zu", (worker - &PGLogicalCtx->workers[0])); worker->worker_type = PGLOGICAL_WORKER_NONE; worker->crashed_at = 0; } } LWLockRelease(PGLogicalCtx->lock); return ret; } /* * Entry point for manager worker. */ void pglogical_manager_main(Datum main_arg) { int slot = DatumGetInt32(main_arg); Oid extoid; int sleep_timer = INITIAL_SLEEP; /* Setup shmem. */ pglogical_worker_attach(slot, PGLOGICAL_WORKER_MANAGER); /* Establish signal handlers. */ pqsignal(SIGTERM, handle_sigterm); CurrentResourceOwner = ResourceOwnerCreate(NULL, "pglogical manager"); StartTransactionCommand(); /* If the extension is not installed in this DB, exit. */ extoid = get_extension_oid(EXTENSION_NAME, true); if (!OidIsValid(extoid)) proc_exit(0); elog(LOG, "starting pglogical database manager for database %s", get_database_name(MyDatabaseId)); CommitTransactionCommand(); /* Use separate transaction to avoid lock escalation. */ StartTransactionCommand(); pglogical_manage_extension(); CommitTransactionCommand(); /* Main wait loop. */ while (!got_SIGTERM) { int rc; bool processed_all; /* Launch the apply workers. */ processed_all = manage_apply_workers(); /* Handle sequences and update our sleep timer as necessary. */ if (synchronize_sequences()) sleep_timer = Min(sleep_timer * 2, MAX_SLEEP); else sleep_timer = Max(sleep_timer / 2, MIN_SLEEP); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, processed_all ? sleep_timer : MIN_SLEEP); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); CHECK_FOR_INTERRUPTS(); } proc_exit(0); } pglogical-REL2_4_1/pglogical_monitoring.c000066400000000000000000000054471415142317000205460ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_monitoring.c * support for monitoring and progress tracking * * Copyright (c) 2017, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_monitoring.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "miscadmin.h" #include "replication/slot.h" #include "utils/pg_lsn.h" #include "storage/ipc.h" #include "storage/proc.h" #include "pgstat.h" #include "pglogical.h" PG_FUNCTION_INFO_V1(pglogical_wait_slot_confirm_lsn); /* * Wait for the confirmed_flush_lsn of the specified slot, or all logical slots * if none given, to pass the supplied value. If no position is supplied the * write position is used. * * No timeout is offered, use a statement_timeout. */ Datum pglogical_wait_slot_confirm_lsn(PG_FUNCTION_ARGS) { XLogRecPtr target_lsn; Name slot_name; int i; if (PG_ARGISNULL(0)) slot_name = NULL; else slot_name = PG_GETARG_NAME(0); if (PG_ARGISNULL(1)) { if (XLogRecPtrIsInvalid(XactLastCommitEnd)) target_lsn = GetXLogInsertRecPtr(); else target_lsn = XactLastCommitEnd; } else target_lsn = PG_GETARG_LSN(1); elog(DEBUG1, "waiting for %s to pass confirmed_flush position %X/%X", slot_name == NULL ? "all local slots" : NameStr(*slot_name), (uint32)(target_lsn>>32), (uint32)target_lsn); do { XLogRecPtr oldest_confirmed_lsn = InvalidXLogRecPtr; int oldest_slot_pos = -1; int rc; LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); for (i = 0; i < max_replication_slots; i++) { ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i]; if (!s->in_use) continue; if (slot_name != NULL && strncmp(NameStr(*slot_name), NameStr(s->data.name), NAMEDATALEN) != 0) continue; if (oldest_confirmed_lsn == InvalidXLogRecPtr || (s->data.confirmed_flush != InvalidXLogRecPtr && s->data.confirmed_flush < oldest_confirmed_lsn)) { oldest_confirmed_lsn = s->data.confirmed_flush; oldest_slot_pos = i; } } if (oldest_slot_pos >= 0) elog(DEBUG2, "oldest confirmed lsn is %X/%X on slot '%s', %u bytes left until %X/%X", (uint32)(oldest_confirmed_lsn>>32), (uint32)oldest_confirmed_lsn, NameStr(ReplicationSlotCtl->replication_slots[oldest_slot_pos].data.name), (uint32)(target_lsn - oldest_confirmed_lsn), (uint32)(target_lsn>>32), (uint32)target_lsn); LWLockRelease(ReplicationSlotControlLock); if (oldest_confirmed_lsn >= target_lsn) break; rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 1000); ResetLatch(&MyProc->procLatch); if (rc & WL_POSTMASTER_DEATH) proc_exit(1); CHECK_FOR_INTERRUPTS(); } while(1); PG_RETURN_VOID(); } pglogical-REL2_4_1/pglogical_node.c000066400000000000000000000615411415142317000173030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_node.c * pglogical node and subscription catalog manipulation functions * * TODO: caching * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_node.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/objectaddress.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "pglogical_node.h" #include "pglogical_repset.h" #include "pglogical_worker.h" #include "pglogical.h" #define CATALOG_NODE "node" #define CATALOG_LOCAL_NODE "local_node" #define CATALOG_NODE_INTERFACE "node_interface" #define CATALOG_SUBSCRIPTION "subscription" typedef struct NodeTuple { Oid node_id; NameData node_name; } NodeTuple; #define Natts_node 4 #define Anum_node_id 1 #define Anum_node_name 2 #define Natts_local_node 2 #define Anum_node_local_id 1 #define Anum_node_local_node_if 2 typedef struct NodeInterfaceTuple { Oid if_id; NameData if_name; Oid if_nodeid; text if_dsn; } NodeInterfaceTuple; #define Natts_node_inteface 4 #define Anum_if_id 1 #define Anum_if_name 2 #define Anum_if_nodeid 3 #define Anum_if_dsn 4 typedef struct SubscriptionTuple { Oid sub_id; NameData sub_name; Oid sub_origin; Oid sub_target; Oid sub_origin_if; Oid sub_target_if; bool sub_enabled; NameData sub_slot_name; } SubscriptionTuple; #define Natts_subscription 12 #define Anum_sub_id 1 #define Anum_sub_name 2 #define Anum_sub_origin 3 #define Anum_sub_target 4 #define Anum_sub_origin_if 5 #define Anum_sub_target_if 6 #define Anum_sub_enabled 7 #define Anum_sub_slot_name 8 #define Anum_sub_replication_sets 9 #define Anum_sub_forward_origins 10 #define Anum_sub_apply_delay 11 #define Anum_sub_force_text_transfer 12 /* * We impose same validation rules as replication slot name validation does. */ static void validate_subscription_name(const char *name) { const char *cp; if (strlen(name) == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), errmsg("subscription name \"%s\" is too short", name))); if (strlen(name) >= NAMEDATALEN) ereport(ERROR, (errcode(ERRCODE_NAME_TOO_LONG), errmsg("subscription name \"%s\" is too long", name))); for (cp = name; *cp; cp++) { if (!((*cp >= 'a' && *cp <= 'z') || (*cp >= '0' && *cp <= '9') || (*cp == '_'))) { ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), errmsg("subscription name \"%s\" contains invalid character", name), errhint("Subscription names may only contain lower case " "letters, numbers, and the underscore character."))); } } } /* * Add new node to catalog. */ void create_node(PGLogicalNode *node) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_node]; bool nulls[Natts_node]; NameData node_name; if (get_node_by_name(node->name, true) != NULL) elog(ERROR, "node %s already exists", node->name); /* Generate new id unless one was already specified. */ if (node->id == InvalidOid) node->id = DatumGetUInt32(hash_any((const unsigned char *) node->name, strlen(node->name))); rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_node_id - 1] = ObjectIdGetDatum(node->id); namestrcpy(&node_name, node->name); values[Anum_node_name - 1] = NameGetDatum(&node_name); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, NoLock); CommandCounterIncrement(); pglogical_subscription_changed(InvalidOid, false); } /* * Delete node from the catalog. */ void drop_node(Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_node_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "node %u not found", nodeid); /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); pglogical_subscription_changed(InvalidOid, false); } static PGLogicalNode * node_fromtuple(HeapTuple tuple) { NodeTuple *nodetup = (NodeTuple *) GETSTRUCT(tuple); PGLogicalNode *node = (PGLogicalNode *) palloc(sizeof(PGLogicalNode)); node->id = nodetup->node_id; node->name = pstrdup(NameStr(nodetup->node_name)); return node; } /* * Load the info for specific node. */ PGLogicalNode * get_node(Oid nodeid) { PGLogicalNode *node; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_node_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "node %u not found", nodeid); node = node_fromtuple(tuple); systable_endscan(scan); table_close(rel, RowExclusiveLock); return node; } /* * Load the info for specific node. */ PGLogicalNode * get_node_by_name(const char *name, bool missing_ok) { PGLogicalNode *node; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_node_name, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(name)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } elog(ERROR, "node %s not found", name); } node = node_fromtuple(tuple); systable_endscan(scan); table_close(rel, RowExclusiveLock); return node; } /* * Add local node record to catalog. */ void create_local_node(Oid nodeid, Oid ifid) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_local_node]; bool nulls[Natts_local_node]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_NODE, -1); rel = table_openrv(rv, AccessExclusiveLock); tupDesc = RelationGetDescr(rel); /* TODO: better error message */ if (get_local_node(false, true)) elog(ERROR, "current database is already configured as pglogical node"); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_node_local_id - 1] = ObjectIdGetDatum(nodeid); values[Anum_node_local_node_if - 1] = ObjectIdGetDatum(ifid); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, AccessExclusiveLock); CommandCounterIncrement(); } /* * Drop local node record from catalog. */ void drop_local_node(void) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_NODE, -1); rel = table_openrv(rv, AccessExclusiveLock); /* Find the local node tuple. */ scan = systable_beginscan(rel, 0, true, NULL, 0, NULL); tuple = systable_getnext(scan); /* No local node record found. */ if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("local node not found"))); /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); } /* * Return local node. */ PGLogicalLocalNode * get_local_node(bool for_update, bool missing_ok) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; TupleDesc desc; Oid nodeid; Oid nodeifid; bool isnull; PGLogicalLocalNode *res; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_NODE, -1); rel = table_openrv_extended(rv, for_update ? ShareUpdateExclusiveLock : RowExclusiveLock, true); if (!rel) { if (missing_ok) return NULL; ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("local pglogical node not found"))); } /* Find the local node tuple. */ scan = systable_beginscan(rel, 0, true, NULL, 0, NULL); tuple = systable_getnext(scan); /* No local node record found. */ if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, for_update ? NoLock : RowExclusiveLock); return NULL; } ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("local pglogical node not found"))); } desc = RelationGetDescr(rel); nodeid = DatumGetObjectId(fastgetattr(tuple, Anum_node_local_id, desc, &isnull)); nodeifid = DatumGetObjectId(fastgetattr(tuple, Anum_node_local_node_if, desc, &isnull)); systable_endscan(scan); table_close(rel, for_update ? NoLock : RowExclusiveLock); res = (PGLogicalLocalNode *) palloc(sizeof(PGLogicalLocalNode)); res->node = get_node(nodeid); res->node_if = get_node_interface(nodeifid); return res; } /* * Add new node interface to catalog. */ void create_node_interface(PGlogicalInterface *nodeif) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_node_inteface]; bool nulls[Natts_node_inteface]; NameData nodeif_name; /* Generate new id unless one was already specified. */ if (nodeif->id == InvalidOid) { uint32 hashinput[2]; hashinput[0] = nodeif->nodeid; hashinput[1] = DatumGetUInt32(hash_any((const unsigned char *) nodeif->name, strlen(nodeif->name))); nodeif->id = DatumGetUInt32(hash_any((const unsigned char *) hashinput, (int) sizeof(hashinput))); } rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE_INTERFACE, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_if_id - 1] = ObjectIdGetDatum(nodeif->id); namestrcpy(&nodeif_name, nodeif->name); values[Anum_if_name - 1] = NameGetDatum(&nodeif_name); values[Anum_if_nodeid - 1] = ObjectIdGetDatum(nodeif->nodeid); values[Anum_if_dsn - 1] = CStringGetTextDatum(nodeif->dsn); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Delete node interface from the catalog. */ void drop_node_interface(Oid ifid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE_INTERFACE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_if_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ifid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "node interface %u not found", ifid); /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); } /* * Delete all node interfaces from the catalog. */ void drop_node_interfaces(Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE_INTERFACE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_if_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); /* Remove tuples. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); } /* * Get the node interface from the catalog. */ PGlogicalInterface * get_node_interface(Oid ifid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; NodeInterfaceTuple *iftup; PGlogicalInterface *nodeif; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE_INTERFACE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_if_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ifid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "node interface %u not found", ifid); iftup = (NodeInterfaceTuple *) GETSTRUCT(tuple); nodeif = (PGlogicalInterface *) palloc(sizeof(PGlogicalInterface)); nodeif->id = iftup->if_id; nodeif->name = pstrdup(NameStr(iftup->if_name)); nodeif->nodeid = iftup->if_nodeid; nodeif->dsn = pstrdup(text_to_cstring(&iftup->if_dsn)); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); return nodeif; } /* * Get the node interface by name. */ PGlogicalInterface * get_node_interface_by_name(Oid nodeid, const char *name, bool missing_ok) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[2]; NodeInterfaceTuple *iftup; PGlogicalInterface *nodeif; rv = makeRangeVar(EXTENSION_NAME, CATALOG_NODE_INTERFACE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for interface record. */ ScanKeyInit(&key[0], Anum_if_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); ScanKeyInit(&key[1], Anum_if_name, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(name)); scan = systable_beginscan(rel, 0, true, NULL, 2, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } else elog(ERROR, "node interface \"%s\" not found for nod %u", name, nodeid); } iftup = (NodeInterfaceTuple *) GETSTRUCT(tuple); nodeif = (PGlogicalInterface *) palloc(sizeof(PGlogicalInterface)); nodeif->id = iftup->if_id; nodeif->name = pstrdup(NameStr(iftup->if_name)); nodeif->nodeid = iftup->if_nodeid; nodeif->dsn = pstrdup(text_to_cstring(&iftup->if_dsn)); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); return nodeif; } /* * Add new subscription to catalog. */ void create_subscription(PGLogicalSubscription *sub) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_subscription]; bool nulls[Natts_subscription]; NameData sub_name; NameData sub_slot_name; /* Validate the new subscription name. */ validate_subscription_name(sub->name); if (get_subscription_by_name(sub->name, true) != NULL) elog(ERROR, "subscription %s already exists", sub->name); /* Generate new id unless one was already specified. */ if (sub->id == InvalidOid) sub->id = DatumGetObjectId(hash_any((const unsigned char *) sub->name, strlen(sub->name))); rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_sub_id - 1] = ObjectIdGetDatum(sub->id); namestrcpy(&sub_name, sub->name); values[Anum_sub_name - 1] = NameGetDatum(&sub_name); values[Anum_sub_origin - 1] = ObjectIdGetDatum(sub->origin_if->nodeid); values[Anum_sub_target - 1] = ObjectIdGetDatum(sub->target_if->nodeid); values[Anum_sub_origin_if - 1] = ObjectIdGetDatum(sub->origin_if->id); values[Anum_sub_target_if - 1] = ObjectIdGetDatum(sub->target_if->id); values[Anum_sub_enabled - 1] = BoolGetDatum(sub->enabled); namestrcpy(&sub_slot_name, sub->slot_name); values[Anum_sub_slot_name - 1] = NameGetDatum(&sub_slot_name); if (list_length(sub->replication_sets) > 0) values[Anum_sub_replication_sets - 1] = PointerGetDatum(strlist_to_textarray(sub->replication_sets)); else nulls[Anum_sub_replication_sets - 1] = true; if (list_length(sub->forward_origins) > 0) values[Anum_sub_forward_origins - 1] = PointerGetDatum(strlist_to_textarray(sub->forward_origins)); else nulls[Anum_sub_forward_origins - 1] = true; if (sub->apply_delay) values[Anum_sub_apply_delay - 1] = IntervalPGetDatum(sub->apply_delay); else nulls[Anum_sub_apply_delay - 1] = true; values[Anum_sub_force_text_transfer - 1] = BoolGetDatum(sub->force_text_transfer); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); pglogical_subscription_changed(sub->id, true); } /* * Change the subscription tuple. */ void alter_subscription(PGLogicalSubscription *sub) { RangeVar *rv; Relation rel; TupleDesc tupDesc; SysScanDesc scan; SubscriptionTuple *oldsub; HeapTuple oldtup, newtup; ScanKeyData key[1]; Datum values[Natts_subscription]; bool nulls[Natts_subscription]; bool replaces[Natts_subscription]; NameData sub_slot_name; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Search for node record. */ ScanKeyInit(&key[0], Anum_sub_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(sub->id)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); oldtup = systable_getnext(scan); if (!HeapTupleIsValid(oldtup)) elog(ERROR, "subscription %u not found", sub->id); oldsub = (SubscriptionTuple *) GETSTRUCT(oldtup); if (strcmp(NameStr(oldsub->sub_name), sub->name) != 0) ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("subscription name change is not supported"))); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); memset(replaces, true, sizeof(replaces)); replaces[Anum_sub_id - 1] = false; replaces[Anum_sub_name - 1] = false; values[Anum_sub_origin - 1] = ObjectIdGetDatum(sub->origin_if->nodeid); values[Anum_sub_target - 1] = ObjectIdGetDatum(sub->target_if->nodeid); values[Anum_sub_origin_if - 1] = ObjectIdGetDatum(sub->origin_if->id); values[Anum_sub_target_if - 1] = ObjectIdGetDatum(sub->target_if->id); values[Anum_sub_enabled - 1] = BoolGetDatum(sub->enabled); namestrcpy(&sub_slot_name, sub->slot_name); values[Anum_sub_slot_name - 1] = NameGetDatum(&sub_slot_name); if (list_length(sub->replication_sets) > 0) values[Anum_sub_replication_sets - 1] = PointerGetDatum(strlist_to_textarray(sub->replication_sets)); else nulls[Anum_sub_replication_sets - 1] = true; if (list_length(sub->forward_origins) > 0) values[Anum_sub_forward_origins - 1] = PointerGetDatum(strlist_to_textarray(sub->forward_origins)); else nulls[Anum_sub_forward_origins - 1] = true; values[Anum_sub_apply_delay - 1] = IntervalPGetDatum(sub->apply_delay); values[Anum_sub_force_text_transfer - 1] = BoolGetDatum(sub->force_text_transfer); newtup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); /* Update the tuple in catalog. */ CatalogTupleUpdate(rel, &oldtup->t_self, newtup); /* Cleanup. */ heap_freetuple(newtup); systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); pglogical_subscription_changed(sub->id, true); } /* * Delete the tuple from subsription catalog. */ void drop_subscription(Oid subid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_sub_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "subscription %u not found", subid); /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, NoLock); CommandCounterIncrement(); pglogical_subscription_changed(subid, true); } static PGLogicalSubscription* subscription_fromtuple(HeapTuple tuple, TupleDesc desc) { SubscriptionTuple *subtup = (SubscriptionTuple *) GETSTRUCT(tuple); Datum d; bool isnull; PGLogicalSubscription *sub = (PGLogicalSubscription *) palloc(sizeof(PGLogicalSubscription)); sub->id = subtup->sub_id; sub->name = pstrdup(NameStr(subtup->sub_name)); sub->enabled = subtup->sub_enabled; sub->slot_name = pstrdup(NameStr(subtup->sub_slot_name)); sub->origin = get_node(subtup->sub_origin); sub->target = get_node(subtup->sub_target); sub->origin_if = get_node_interface(subtup->sub_origin_if); sub->target_if = get_node_interface(subtup->sub_target_if); /* Get replication sets. */ d = heap_getattr(tuple, Anum_sub_replication_sets, desc, &isnull); if (isnull) sub->replication_sets = NIL; else { List *repset_names; repset_names = textarray_to_list(DatumGetArrayTypeP(d)); sub->replication_sets = repset_names; } /* Get origin forwarding. */ d = heap_getattr(tuple, Anum_sub_forward_origins, desc, &isnull); if (isnull) sub->forward_origins = NIL; else { List *forward_origin_names; forward_origin_names = textarray_to_list(DatumGetArrayTypeP(d)); sub->forward_origins = forward_origin_names; } /* Get apply_delay. */ d = heap_getattr(tuple, Anum_sub_apply_delay, desc, &isnull); if (isnull) sub->apply_delay = NULL; else sub->apply_delay = DatumGetIntervalP(d); /* Get force_text_transfer. */ d = heap_getattr(tuple, Anum_sub_force_text_transfer, desc, &isnull); if (isnull) sub->force_text_transfer = false; else sub->force_text_transfer = DatumGetBool(d); return sub; } /* * Load the info for specific subscriber. */ PGLogicalSubscription * get_subscription(Oid subid) { PGLogicalSubscription *sub; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; TupleDesc desc; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_sub_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "subscription %u not found", subid); desc = RelationGetDescr(rel); sub = subscription_fromtuple(tuple, desc); systable_endscan(scan); table_close(rel, RowExclusiveLock); return sub; } /* * Load the info for specific subscriber. */ PGLogicalSubscription * get_subscription_by_name(const char *name, bool missing_ok) { PGLogicalSubscription *sub; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; TupleDesc desc; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for node record. */ ScanKeyInit(&key[0], Anum_sub_name, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(name)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } elog(ERROR, "subscriber %s not found", name); } desc = RelationGetDescr(rel); sub = subscription_fromtuple(tuple, desc); systable_endscan(scan); table_close(rel, RowExclusiveLock); return sub; } /* * Return all target node subscriptions. */ List * get_node_subscriptions(Oid nodeid, bool origin) { PGLogicalSubscription *sub; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; TupleDesc desc; ScanKeyData key[1]; List *res = NIL; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SUBSCRIPTION, -1); rel = table_openrv(rv, RowExclusiveLock); desc = RelationGetDescr(rel); ScanKeyInit(&key[0], origin ? Anum_sub_origin : Anum_sub_target, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { sub = subscription_fromtuple(tuple, desc); res = lappend(res, sub); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } pglogical-REL2_4_1/pglogical_node.h000066400000000000000000000043021415142317000173000ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_node.h * pglogical node and connection catalog manipulation functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_node.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_NODE_H #define PGLOGICAL_NODE_H #include "datatype/timestamp.h" typedef struct PGLogicalNode { Oid id; char *name; } PGLogicalNode; typedef struct PGlogicalInterface { Oid id; const char *name; Oid nodeid; const char *dsn; } PGlogicalInterface; typedef struct PGLogicalLocalNode { PGLogicalNode *node; PGlogicalInterface *node_if; } PGLogicalLocalNode; typedef struct PGLogicalSubscription { Oid id; char *name; PGLogicalNode *origin; PGLogicalNode *target; PGlogicalInterface *origin_if; PGlogicalInterface *target_if; bool enabled; Interval *apply_delay; char *slot_name; List *replication_sets; List *forward_origins; bool force_text_transfer; } PGLogicalSubscription; extern void create_node(PGLogicalNode *node); extern void drop_node(Oid nodeid); extern PGLogicalNode *get_node(Oid nodeid); extern PGLogicalNode *get_node_by_name(const char *name, bool missing_ok); extern void create_node_interface(PGlogicalInterface *node); extern void drop_node_interface(Oid ifid); extern void drop_node_interfaces(Oid nodeid); extern PGlogicalInterface *get_node_interface(Oid ifid); extern PGlogicalInterface *get_node_interface_by_name(Oid nodeid, const char *name, bool missing_ok); extern void create_local_node(Oid nodeid, Oid ifid); extern void drop_local_node(void); extern PGLogicalLocalNode *get_local_node(bool for_update, bool missing_ok); extern void create_subscription(PGLogicalSubscription *sub); extern void alter_subscription(PGLogicalSubscription *sub); extern void drop_subscription(Oid subid); extern PGLogicalSubscription *get_subscription(Oid subid); extern PGLogicalSubscription *get_subscription_by_name(const char *name, bool missing_ok); extern List *get_node_subscriptions(Oid nodeid, bool origin); #endif /* PGLOGICAL_NODE_H */ pglogical-REL2_4_1/pglogical_origin--1.0.0.sql000066400000000000000000000001071415142317000207200ustar00rootroot00000000000000\echo Use "CREATE EXTENSION pglogical_origin" to load this file. \quit pglogical-REL2_4_1/pglogical_origin.control000066400000000000000000000003321415142317000210720ustar00rootroot00000000000000# pglogical_origin extension comment = 'Dummy extension for compatibility when upgrading from Postgres 9.4' default_version = '1.0.0' module_pathname = '$libdir/pglogical' relocatable = false schema = pglogical_origin pglogical-REL2_4_1/pglogical_output.c000066400000000000000000000017321415142317000177120ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_output.c * Logical Replication output plugin which just loads and forwards * the call to the pglogical. * * This exists for backwards compatibility. * * Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_output.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "replication/logical.h" PG_MODULE_MAGIC; extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); void _PG_output_plugin_init(OutputPluginCallbacks *cb) { LogicalOutputPluginInit plugin_init; AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit); plugin_init = (LogicalOutputPluginInit) load_external_function("pglogical", "_PG_output_plugin_init", false, NULL); if (plugin_init == NULL) elog(ERROR, "could not load pglogical output plugin"); plugin_init(cb); } pglogical-REL2_4_1/pglogical_output_config.c000066400000000000000000000403301415142317000212340ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_output_config.c * Logical Replication output plugin configuration handling * * Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_output_config.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "catalog/catversion.h" #include "mb/pg_wchar.h" #include "nodes/makefuncs.h" #include "replication/reorderbuffer.h" #include "utils/builtins.h" #include "utils/int8.h" #include "miscadmin.h" #include "pglogical.h" #include "pglogical_output_config.h" #include "pglogical_output_proto.h" #include "pglogical_repset.h" typedef enum PGLogicalOutputParamType { OUTPUT_PARAM_TYPE_BOOL, OUTPUT_PARAM_TYPE_UINT32, OUTPUT_PARAM_TYPE_INT32, OUTPUT_PARAM_TYPE_STRING, OUTPUT_PARAM_TYPE_QUALIFIED_NAME } PGLogicalOutputParamType; /* param parsing */ static Datum get_param_value(DefElem *elem, bool null_ok, PGLogicalOutputParamType type); static Datum get_param(List *options, const char *name, bool missing_ok, bool null_ok, PGLogicalOutputParamType type, bool *found); static bool parse_param_bool(DefElem *elem); static uint32 parse_param_uint32(DefElem *elem); static int32 parse_param_int32(DefElem *elem); static void process_parameters_v1(List *options, PGLogicalOutputData *data); enum { PARAM_UNRECOGNISED, PARAM_MAX_PROTOCOL_VERSION, PARAM_MIN_PROTOCOL_VERSION, PARAM_PROTOCOL_FORMAT, PARAM_EXPECTED_ENCODING, PARAM_BINARY_BIGENDIAN, PARAM_BINARY_SIZEOF_DATUM, PARAM_BINARY_SIZEOF_INT, PARAM_BINARY_SIZEOF_LONG, PARAM_BINARY_FLOAT4BYVAL, PARAM_BINARY_FLOAT8BYVAL, PARAM_BINARY_INTEGER_DATETIMES, PARAM_BINARY_WANT_INTERNAL_BASETYPES, PARAM_BINARY_WANT_BINARY_BASETYPES, PARAM_BINARY_BASETYPES_MAJOR_VERSION, PARAM_PGLOGICAL_FORWARD_ORIGINS, PARAM_PGLOGICAL_REPLICATION_SET_NAMES, PARAM_PGLOGICAL_REPLICATE_ONLY_TABLE, PARAM_HOOKS_SETUP_FUNCTION, PARAM_PG_VERSION, PARAM_NO_TXINFO } OutputPluginParamKey; typedef struct { const char * const paramname; int paramkey; } OutputPluginParam; /* Oh, if only C had switch on strings */ static OutputPluginParam param_lookup[] = { {"max_proto_version", PARAM_MAX_PROTOCOL_VERSION}, {"min_proto_version", PARAM_MIN_PROTOCOL_VERSION}, {"proto_format", PARAM_PROTOCOL_FORMAT}, {"expected_encoding", PARAM_EXPECTED_ENCODING}, {"binary.bigendian", PARAM_BINARY_BIGENDIAN}, {"binary.sizeof_datum", PARAM_BINARY_SIZEOF_DATUM}, {"binary.sizeof_int", PARAM_BINARY_SIZEOF_INT}, {"binary.sizeof_long", PARAM_BINARY_SIZEOF_LONG}, {"binary.float4_byval", PARAM_BINARY_FLOAT4BYVAL}, {"binary.float8_byval", PARAM_BINARY_FLOAT8BYVAL}, {"binary.integer_datetimes", PARAM_BINARY_INTEGER_DATETIMES}, {"binary.want_internal_basetypes", PARAM_BINARY_WANT_INTERNAL_BASETYPES}, {"binary.want_binary_basetypes", PARAM_BINARY_WANT_BINARY_BASETYPES}, {"binary.basetypes_major_version", PARAM_BINARY_BASETYPES_MAJOR_VERSION}, {"pglogical.forward_origins", PARAM_PGLOGICAL_FORWARD_ORIGINS}, {"pglogical.replication_set_names", PARAM_PGLOGICAL_REPLICATION_SET_NAMES}, {"pglogical.replicate_only_table", PARAM_PGLOGICAL_REPLICATE_ONLY_TABLE}, {"hooks.setup_function", PARAM_HOOKS_SETUP_FUNCTION}, {"pg_version", PARAM_PG_VERSION}, {"no_txinfo", PARAM_NO_TXINFO}, {NULL, PARAM_UNRECOGNISED} }; /* * Look up a param name to find the enum value for the * param, or PARAM_UNRECOGNISED if not found. */ static int get_param_key(const char * const param_name) { OutputPluginParam *param = ¶m_lookup[0]; do { if (strcmp(param->paramname, param_name) == 0) return param->paramkey; param++; } while (param->paramname != NULL); return PARAM_UNRECOGNISED; } void process_parameters_v1(List *options, PGLogicalOutputData *data) { Datum val; ListCell *lc; /* * max_proto_version and min_proto_version are specified * as required, and must be parsed before anything else. * * TODO: We should still parse them as optional and * delay the ERROR until after the startup reply. */ val = get_param(options, "max_proto_version", false, false, OUTPUT_PARAM_TYPE_UINT32, NULL); data->client_max_proto_version = DatumGetUInt32(val); val = get_param(options, "min_proto_version", false, false, OUTPUT_PARAM_TYPE_UINT32, NULL); data->client_min_proto_version = DatumGetUInt32(val); /* Examine all the other params in the v1 message. */ foreach(lc, options) { DefElem *elem = lfirst(lc); Assert(elem->arg == NULL || IsA(elem->arg, String)); /* Check each param, whether or not we recognise it */ switch(get_param_key(elem->defname)) { case PARAM_BINARY_BIGENDIAN: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_binary_bigendian_set = true; data->client_binary_bigendian = DatumGetBool(val); break; case PARAM_BINARY_SIZEOF_DATUM: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_UINT32); data->client_binary_sizeofdatum = DatumGetUInt32(val); break; case PARAM_BINARY_SIZEOF_INT: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_UINT32); data->client_binary_sizeofint = DatumGetUInt32(val); break; case PARAM_BINARY_SIZEOF_LONG: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_UINT32); data->client_binary_sizeoflong = DatumGetUInt32(val); break; case PARAM_BINARY_FLOAT4BYVAL: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_binary_float4byval_set = true; data->client_binary_float4byval = DatumGetBool(val); break; case PARAM_BINARY_FLOAT8BYVAL: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_binary_float4byval_set = true; data->client_binary_float4byval = DatumGetBool(val); break; case PARAM_BINARY_INTEGER_DATETIMES: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_binary_intdatetimes_set = true; data->client_binary_intdatetimes = DatumGetBool(val); break; case PARAM_PROTOCOL_FORMAT: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_STRING); data->client_protocol_format = DatumGetCString(val); break; case PARAM_EXPECTED_ENCODING: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_STRING); data->client_expected_encoding = DatumGetCString(val); break; case PARAM_PG_VERSION: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_UINT32); data->client_pg_version = DatumGetUInt32(val); break; case PARAM_BINARY_WANT_INTERNAL_BASETYPES: /* check if we want to use internal data representation */ val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_want_internal_basetypes_set = true; data->client_want_internal_basetypes = DatumGetBool(val); break; case PARAM_BINARY_WANT_BINARY_BASETYPES: /* check if we want to use binary data representation */ val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_want_binary_basetypes_set = true; data->client_want_binary_basetypes = DatumGetBool(val); break; case PARAM_BINARY_BASETYPES_MAJOR_VERSION: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_UINT32); data->client_binary_basetypes_major_version = DatumGetUInt32(val); break; case PARAM_PGLOGICAL_FORWARD_ORIGINS: { List *forward_origin_names; ListCell *lc; val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_STRING); if (!SplitIdentifierString(DatumGetCString(val), ',', &forward_origin_names)) elog(ERROR, "Could not parse forward origin name list %s", DatumGetCString(val)); foreach (lc, forward_origin_names) { char *origin_name = (char *) lfirst(lc); if (strcmp(origin_name, REPLICATION_ORIGIN_ALL) != 0) elog(ERROR, "Only \"%s\" is allowed in forward origin name list at the moment, found \"%s\"", REPLICATION_ORIGIN_ALL, origin_name); } data->forward_origins = forward_origin_names; break; } case PARAM_PGLOGICAL_REPLICATION_SET_NAMES: { List *replication_set_names; val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_STRING); if (!SplitIdentifierString(strVal(elem->arg), ',', &replication_set_names)) elog(ERROR, "Could not parse replication set name list %s", strVal(elem->arg)); data->replication_sets = get_replication_sets(data->local_node_id, replication_set_names, false); break; } case PARAM_PGLOGICAL_REPLICATE_ONLY_TABLE: { List *replicate_only_table; val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_STRING); if (!SplitIdentifierString(strVal(elem->arg), '.', &replicate_only_table)) elog(ERROR, "Could not parse replicate_only_table %s", strVal(elem->arg)); data->replicate_only_table = makeRangeVar(pstrdup(linitial(replicate_only_table)), pstrdup(lsecond(replicate_only_table)), -1); break; } case PARAM_NO_TXINFO: val = get_param_value(elem, false, OUTPUT_PARAM_TYPE_BOOL); data->client_no_txinfo = DatumGetBool(val); break; /* Backwards compat. */ case PARAM_HOOKS_SETUP_FUNCTION: break; case PARAM_UNRECOGNISED: ereport(DEBUG1, (errmsg("Unrecognised pglogical parameter %s ignored", elem->defname))); break; } } } /* * Read parameters sent by client at startup and store recognised * ones in the parameters PGLogicalOutputData. * * The PGLogicalOutputData must have all client-supplied parameter fields * zeroed, such as by memset or palloc0, since values not supplied * by the client are not set. */ int process_parameters(List *options, PGLogicalOutputData *data) { Datum val; int params_format; val = get_param(options, "startup_params_format", false, false, OUTPUT_PARAM_TYPE_UINT32, NULL); params_format = DatumGetUInt32(val); if (params_format == PGLOGICAL_STARTUP_PARAM_FORMAT_FLAT) process_parameters_v1(options, data); else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("startup_params_format %d not supported, only version %d supported", params_format, PGLOGICAL_STARTUP_PARAM_FORMAT_FLAT))); return params_format; } static Datum get_param_value(DefElem *elem, bool null_ok, PGLogicalOutputParamType type) { /* Check for NULL value */ if (elem->arg == NULL || strVal(elem->arg) == NULL) { if (null_ok) return (Datum) 0; else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("parameter \"%s\" cannot be NULL", elem->defname))); } switch (type) { case OUTPUT_PARAM_TYPE_UINT32: return UInt32GetDatum(parse_param_uint32(elem)); case OUTPUT_PARAM_TYPE_INT32: return Int32GetDatum(parse_param_int32(elem)); case OUTPUT_PARAM_TYPE_BOOL: return BoolGetDatum(parse_param_bool(elem)); case OUTPUT_PARAM_TYPE_STRING: return PointerGetDatum(pstrdup(strVal(elem->arg))); case OUTPUT_PARAM_TYPE_QUALIFIED_NAME: return PointerGetDatum(textToQualifiedNameList(cstring_to_text(pstrdup(strVal(elem->arg))))); default: elog(ERROR, "unknown parameter type %d", type); } } /* * Param parsing * * This is not exactly fast but since it's only called on replication start * we'll leave it for now. */ static Datum get_param(List *options, const char *name, bool missing_ok, bool null_ok, PGLogicalOutputParamType type, bool *found) { ListCell *option; if (found != NULL) *found = false; else Assert(!missing_ok); foreach(option, options) { DefElem *elem = lfirst(option); Assert(elem->arg == NULL || IsA(elem->arg, String)); /* Search until matching parameter found */ if (pg_strcasecmp(name, elem->defname)) continue; if (found != NULL) *found = true; return get_param_value(elem, null_ok, type); } if (!missing_ok) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("missing required parameter \"%s\"", name))); return (Datum) 0; } static bool parse_param_bool(DefElem *elem) { bool res; if (!parse_bool(strVal(elem->arg), &res)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not parse boolean value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); return res; } static uint32 parse_param_uint32(DefElem *elem) { int64 res; if (!scanint8(strVal(elem->arg), true, &res)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not parse integer value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); if (res > PG_UINT32_MAX || res < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("value \"%s\" out of range for parameter \"%s\"", strVal(elem->arg), elem->defname))); return (uint32) res; } static int32 parse_param_int32(DefElem *elem) { int64 res; if (!scanint8(strVal(elem->arg), true, &res)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not parse integer value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); if (res > PG_INT32_MAX || res < PG_INT32_MIN) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("value \"%s\" out of range for parameter \"%s\"", strVal(elem->arg), elem->defname))); return (int32) res; } static List* add_startup_msg_s(List *l, char *key, char *val) { return lappend(l, makeDefElem(key, (Node*)makeString(val))); } static List* add_startup_msg_i(List *l, char *key, int val) { return lappend(l, makeDefElem(key, (Node*)makeString(psprintf("%d", val)))); } static List* add_startup_msg_b(List *l, char *key, bool val) { return lappend(l, makeDefElem(key, (Node*)makeString(val ? "t" : "f"))); } /* * This builds the protocol startup message, which is always the first * message on the wire after the client sends START_REPLICATION. * * It confirms to the client that we could apply requested options, and * tells the client our capabilities. * * Any additional parameters provided by the startup hook are also output * now. * * The output param 'msg' is a null-terminated char* palloc'd in the current * memory context and the length 'len' of that string that is valid. The caller * should pfree the result after use. * * This is a bit less efficient than direct pq_sendblah calls, but * separates config handling from the protocol implementation, and * it's not like startup msg performance matters much. */ List * prepare_startup_message(PGLogicalOutputData *data) { List *l = NIL; l = add_startup_msg_s(l, "max_proto_version", "1"); l = add_startup_msg_s(l, "min_proto_version", "1"); /* We don't support understand column types yet */ l = add_startup_msg_b(l, "coltypes", false); /* Info about our Pg host */ l = add_startup_msg_i(l, "pg_version_num", PG_VERSION_NUM); l = add_startup_msg_s(l, "pg_version", PG_VERSION); l = add_startup_msg_i(l, "pg_catversion", CATALOG_VERSION_NO); l = add_startup_msg_s(l, "database_encoding", (char*)GetDatabaseEncodingName()); l = add_startup_msg_s(l, "encoding", (char*)pg_encoding_to_char(data->field_datum_encoding)); l = add_startup_msg_b(l, "forward_changeset_origins", data->forward_changeset_origins); l = add_startup_msg_i(l, "walsender_pid", MyProcPid); /* and ourselves */ l = add_startup_msg_s(l, "pglogical_version", PGLOGICAL_VERSION); l = add_startup_msg_i(l, "pglogical_version_num", PGLOGICAL_VERSION_NUM); /* binary options enabled */ l = add_startup_msg_b(l, "binary.internal_basetypes", data->allow_internal_basetypes); l = add_startup_msg_b(l, "binary.binary_basetypes", data->allow_binary_basetypes); /* Binary format characteristics of server */ l = add_startup_msg_i(l, "binary.basetypes_major_version", PG_VERSION_NUM/100); l = add_startup_msg_i(l, "binary.sizeof_int", sizeof(int)); l = add_startup_msg_i(l, "binary.sizeof_long", sizeof(long)); l = add_startup_msg_i(l, "binary.sizeof_datum", sizeof(Datum)); l = add_startup_msg_i(l, "binary.maxalign", MAXIMUM_ALIGNOF); l = add_startup_msg_b(l, "binary.bigendian", server_bigendian()); l = add_startup_msg_b(l, "binary.float4_byval", server_float4_byval()); l = add_startup_msg_b(l, "binary.float8_byval", server_float8_byval()); l = add_startup_msg_b(l, "binary.integer_datetimes", server_integer_datetimes()); /* We don't know how to send in anything except our host's format */ l = add_startup_msg_i(l, "binary.binary_pg_version", PG_VERSION_NUM/100); l = add_startup_msg_b(l, "no_txinfo", data->client_no_txinfo); return l; } pglogical-REL2_4_1/pglogical_output_config.h000066400000000000000000000013611415142317000212420ustar00rootroot00000000000000#ifndef PG_LOGICAL_OUTPUT_CONFIG_H #define PG_LOGICAL_OUTPUT_CONFIG_H #include "nodes/pg_list.h" #include "pglogical_output_plugin.h" inline static bool server_float4_byval(void) { #ifdef USE_FLOAT4_BYVAL return true; #else return false; #endif } inline static bool server_float8_byval(void) { #ifdef USE_FLOAT8_BYVAL return true; #else return false; #endif } inline static bool server_integer_datetimes(void) { #ifdef USE_INTEGER_DATETIMES return true; #else return false; #endif } inline static bool server_bigendian(void) { #ifdef WORDS_BIGENDIAN return true; #else return false; #endif } extern int process_parameters(List *options, PGLogicalOutputData *data); extern List *prepare_startup_message(PGLogicalOutputData *data); #endif pglogical-REL2_4_1/pglogical_output_plugin.c000066400000000000000000000707641415142317000213030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_output.c * Logical Replication output plugin * * Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_output.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include #include #include #include "mb/pg_wchar.h" #include "replication/logical.h" #include "access/xact.h" #include "executor/executor.h" #include "catalog/namespace.h" #include "storage/fd.h" #include "storage/lmgr.h" #include "utils/inval.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "replication/origin.h" #include "pglogical_output_plugin.h" #include "pglogical.h" #include "pglogical_output_config.h" #include "pglogical_executor.h" #include "pglogical_node.h" #include "pglogical_output_proto.h" #include "pglogical_queue.h" #include "pglogical_repset.h" #ifdef HAVE_REPLICATION_ORIGINS #include "replication/origin.h" #endif extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); static void pg_decode_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, bool is_init); static void pg_decode_shutdown(LogicalDecodingContext * ctx); static void pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); static void pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); static void pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); #ifdef HAVE_REPLICATION_ORIGINS static bool pg_decode_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); #endif static void send_startup_message(LogicalDecodingContext *ctx, PGLogicalOutputData *data, bool last_message); static bool startup_message_sent = false; typedef struct PGLRelMetaCacheEntry { Oid relid; /* Does the client have this relation cached? */ bool is_cached; /* Entry is valid and not due to be purged */ bool is_valid; } PGLRelMetaCacheEntry; #define RELMETACACHE_INITIAL_SIZE 128 static HTAB *RelMetaCache = NULL; static MemoryContext RelMetaCacheContext = NULL; static int InvalidRelMetaCacheCnt = 0; static void relmetacache_init(MemoryContext decoding_context); static PGLRelMetaCacheEntry *relmetacache_get_relation(PGLogicalOutputData *data, Relation rel); static void relmetacache_flush(void); static void relmetacache_prune(void); static void pglReorderBufferCleanSerializedTXNs(const char *slotname); /* specify output plugin callbacks */ void _PG_output_plugin_init(OutputPluginCallbacks *cb) { AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit); cb->startup_cb = pg_decode_startup; cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; cb->commit_cb = pg_decode_commit_txn; #ifdef HAVE_REPLICATION_ORIGINS cb->filter_by_origin_cb = pg_decode_origin_filter; #endif cb->shutdown_cb = pg_decode_shutdown; } static bool check_binary_compatibility(PGLogicalOutputData *data) { if (data->client_binary_basetypes_major_version != PG_VERSION_NUM / 100) return false; if (data->client_binary_bigendian_set && data->client_binary_bigendian != server_bigendian()) { elog(DEBUG1, "Binary mode rejected: Server and client endian mismatch"); return false; } if (data->client_binary_sizeofdatum != 0 && data->client_binary_sizeofdatum != sizeof(Datum)) { elog(DEBUG1, "Binary mode rejected: Server and client sizeof(Datum) mismatch"); return false; } if (data->client_binary_sizeofint != 0 && data->client_binary_sizeofint != sizeof(int)) { elog(DEBUG1, "Binary mode rejected: Server and client sizeof(int) mismatch"); return false; } if (data->client_binary_sizeoflong != 0 && data->client_binary_sizeoflong != sizeof(long)) { elog(DEBUG1, "Binary mode rejected: Server and client sizeof(long) mismatch"); return false; } if (data->client_binary_float4byval_set && data->client_binary_float4byval != server_float4_byval()) { elog(DEBUG1, "Binary mode rejected: Server and client float4byval mismatch"); return false; } if (data->client_binary_float8byval_set && data->client_binary_float8byval != server_float8_byval()) { elog(DEBUG1, "Binary mode rejected: Server and client float8byval mismatch"); return false; } if (data->client_binary_intdatetimes_set && data->client_binary_intdatetimes != server_integer_datetimes()) { elog(DEBUG1, "Binary mode rejected: Server and client integer datetimes mismatch"); return false; } return true; } /* initialize this plugin */ static void pg_decode_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, bool is_init) { PGLogicalOutputData *data = palloc0(sizeof(PGLogicalOutputData)); /* Short lived memory context for individual messages */ data->context = AllocSetContextCreate(ctx->context, "pglogical output msg context", ALLOCSET_DEFAULT_SIZES); data->allow_internal_basetypes = false; data->allow_binary_basetypes = false; ctx->output_plugin_private = data; /* * This is replication start and not slot initialization. * * Parse and validate options passed by the client. */ if (!is_init) { int params_format; bool started_tx = false; PGLogicalLocalNode *node; MemoryContext oldctx; /* * There's a potential corruption bug in PostgreSQL 10.1, 9.6.6, 9.5.10 * and 9.4.15 that can cause reorder buffers to accumulate duplicated * transactions. See * https://www.postgresql.org/message-id/CAMsr+YHdX=XECbZshDZ2CZNWGTyw-taYBnzqVfx4JzM4ExP5xg@mail.gmail.com * * We can defend against this by doing our own cleanup of any serialized * txns in the reorder buffer on startup. */ pglReorderBufferCleanSerializedTXNs(NameStr(MyReplicationSlot->data.name)); if (!IsTransactionState()) { StartTransactionCommand(); started_tx = true; } node = get_local_node(false, false); data->local_node_id = node->node->id; /* * Ideally we'd send the startup message immediately. That way * it'd arrive before any error we emit if we see incompatible * options sent by the client here. That way the client could * possibly adjust its options and reconnect. It'd also make * sure the client gets the startup message in a timely way if * the server is idle, since otherwise it could be a while * before the next callback. * * The decoding plugin API doesn't let us write to the stream * from here, though, so we have to delay the startup message * until the first change processed on the stream, in a begin * callback. * * If we ERROR there, the startup message is buffered but not * sent since the callback didn't finish. So we'd have to send * the startup message, finish the callback and check in the * next callback if we need to ERROR. * * That's a bit much hoop jumping, so for now ERRORs are * immediate. A way to emit a message from the startup callback * is really needed to change that. */ startup_message_sent = false; /* Now parse the rest of the params and ERROR if we see any we don't recognise */ oldctx = MemoryContextSwitchTo(ctx->context); params_format = process_parameters(ctx->output_plugin_options, data); MemoryContextSwitchTo(oldctx); if (params_format != 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("client sent startup parameters in format %d but we only support format 1", params_format))); if (data->client_min_proto_version > PGLOGICAL_PROTO_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("client sent min_proto_version=%d but we only support protocol %d or lower", data->client_min_proto_version, PGLOGICAL_PROTO_VERSION_NUM))); if (data->client_max_proto_version < PGLOGICAL_PROTO_MIN_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("client sent max_proto_version=%d but we only support protocol %d or higher", data->client_max_proto_version, PGLOGICAL_PROTO_MIN_VERSION_NUM))); /* * Set correct protocol format. * * This is the output plugin protocol format, this is different * from the individual fields binary vs textual format. */ if (data->client_protocol_format != NULL && strcmp(data->client_protocol_format, "json") == 0) { oldctx = MemoryContextSwitchTo(ctx->context); data->api = pglogical_init_api(PGLogicalProtoJson); opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT; MemoryContextSwitchTo(oldctx); } else if ((data->client_protocol_format != NULL && strcmp(data->client_protocol_format, "native") == 0) || data->client_protocol_format == NULL) { oldctx = MemoryContextSwitchTo(ctx->context); data->api = pglogical_init_api(PGLogicalProtoNative); opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; if (data->client_no_txinfo) { elog(WARNING, "no_txinfo option ignored for protocols other than json"); data->client_no_txinfo = false; } MemoryContextSwitchTo(oldctx); } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("client requested protocol %s but only \"json\" or \"native\" are supported", data->client_protocol_format))); } /* check for encoding match if specific encoding demanded by client */ if (data->client_expected_encoding != NULL && strlen(data->client_expected_encoding) != 0) { int wanted_encoding = pg_char_to_encoding(data->client_expected_encoding); if (wanted_encoding == -1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognised encoding name %s passed to expected_encoding", data->client_expected_encoding))); if (opt->output_type == OUTPUT_PLUGIN_TEXTUAL_OUTPUT) { /* * datum encoding must match assigned client_encoding in text * proto, since everything is subject to client_encoding * conversion. */ if (wanted_encoding != pg_get_client_encoding()) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("expected_encoding must be unset or match client_encoding in text protocols"))); } else { /* * currently in the binary protocol we can only emit encoded * datums in the server encoding. There's no support for encoding * conversion. */ if (wanted_encoding != GetDatabaseEncoding()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("encoding conversion for binary datum not supported yet"), errdetail("expected_encoding %s must be unset or match server_encoding %s", data->client_expected_encoding, GetDatabaseEncodingName()))); } data->field_datum_encoding = wanted_encoding; } /* * It's obviously not possible to send binary representation of data * unless we use the binary output. */ if (opt->output_type == OUTPUT_PLUGIN_BINARY_OUTPUT && data->client_want_internal_basetypes) { data->allow_internal_basetypes = check_binary_compatibility(data); } if (opt->output_type == OUTPUT_PLUGIN_BINARY_OUTPUT && data->client_want_binary_basetypes && data->client_binary_basetypes_major_version == PG_VERSION_NUM / 100) { data->allow_binary_basetypes = true; } /* * 9.4 lacks origins info so don't forward it. * * There's currently no knob for clients to use to suppress * this info and it's sent if it's supported and available. */ if (PG_VERSION_NUM/100 == 904) data->forward_changeset_origins = false; else data->forward_changeset_origins = true; if (started_tx) CommitTransactionCommand(); relmetacache_init(ctx->context); } /* So we can identify the process type in Valgrind logs */ VALGRIND_PRINTF("PGLOGICAL: pglogical worker output_plugin\n"); /* For incremental leak checking */ VALGRIND_DISABLE_ERROR_REPORTING; VALGRIND_DO_LEAK_CHECK; VALGRIND_ENABLE_ERROR_REPORTING; } /* * BEGIN callback */ static void pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { PGLogicalOutputData* data = (PGLogicalOutputData*)ctx->output_plugin_private; bool send_replication_origin = data->forward_changeset_origins; MemoryContext old_ctx; old_ctx = MemoryContextSwitchTo(data->context); VALGRIND_DO_ADDED_LEAK_CHECK; if (!startup_message_sent) send_startup_message(ctx, data, false /* can't be last message */); #ifdef HAVE_REPLICATION_ORIGINS /* If the record didn't originate locally, send origin info */ send_replication_origin &= txn->origin_id != InvalidRepOriginId; #endif OutputPluginPrepareWrite(ctx, !send_replication_origin); data->api->write_begin(ctx->out, data, txn); #ifdef HAVE_REPLICATION_ORIGINS if (send_replication_origin) { char *origin; /* Message boundary */ OutputPluginWrite(ctx, false); OutputPluginPrepareWrite(ctx, true); /* * XXX: which behaviour we want here? * * Alternatives: * - don't send origin message if origin name not found * (that's what we do now) * - throw error - that will break replication, not good * - send some special "unknown" origin */ if (data->api->write_origin && replorigin_by_oid(txn->origin_id, true, &origin)) data->api->write_origin(ctx->out, origin, txn->origin_lsn); } #endif OutputPluginWrite(ctx, true); Assert(CurrentMemoryContext == data->context); MemoryContextSwitchTo(old_ctx); } /* * COMMIT callback */ static void pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn) { PGLogicalOutputData* data = (PGLogicalOutputData*)ctx->output_plugin_private; MemoryContext old_ctx; old_ctx = MemoryContextSwitchTo(data->context); OutputPluginPrepareWrite(ctx, true); data->api->write_commit(ctx->out, data, txn, commit_lsn); OutputPluginWrite(ctx, true); /* * Now is a good time to get rid of invalidated relation * metadata entries since nothing will be referencing them * at the moment. */ relmetacache_prune(); Assert(CurrentMemoryContext == data->context); MemoryContextSwitchTo(old_ctx); MemoryContextReset(data->context); VALGRIND_DO_ADDED_LEAK_CHECK; } static bool pglogical_change_filter(PGLogicalOutputData *data, Relation relation, ReorderBufferChange *change, Bitmapset **att_list) { PGLogicalTableRepInfo *tblinfo; ListCell *lc; if (data->replicate_only_table) { /* * Special case - we are catching up just one table. * TODO: performance */ return strcmp(RelationGetRelationName(relation), data->replicate_only_table->relname) == 0 && RelationGetNamespace(relation) == get_namespace_oid(data->replicate_only_table->schemaname, true); } else if (RelationGetRelid(relation) == get_queue_table_oid()) { /* Special case - queue table */ if (change->action == REORDER_BUFFER_CHANGE_INSERT) { HeapTuple tup = &change->data.tp.newtuple->tuple; QueuedMessage *q; ListCell *qlc; LockRelation(relation, AccessShareLock); q = queued_message_from_tuple(tup); UnlockRelation(relation, AccessShareLock); /* * No replication set means global message, those are always * replicated. */ if (q->replication_sets == NULL) return true; foreach (qlc, q->replication_sets) { char *queue_set = (char *) lfirst(qlc); ListCell *plc; foreach (plc, data->replication_sets) { PGLogicalRepSet *rs = lfirst(plc); /* TODO: this is somewhat ugly. */ if (strcmp(queue_set, rs->name) == 0 && (q->message_type != QUEUE_COMMAND_TYPE_TRUNCATE || rs->replicate_truncate)) return true; } } } return false; } else if (RelationGetRelid(relation) == get_replication_set_rel_oid()) { /* * Special case - replication set table. * * We can use this to update our cached replication set info, without * having to deal with cache invalidation callbacks. */ HeapTuple tup; PGLogicalRepSet *replicated_set; ListCell *plc; if (change->action == REORDER_BUFFER_CHANGE_UPDATE) tup = &change->data.tp.newtuple->tuple; else if (change->action == REORDER_BUFFER_CHANGE_DELETE) tup = &change->data.tp.oldtuple->tuple; else return false; replicated_set = replication_set_from_tuple(tup); foreach (plc, data->replication_sets) { PGLogicalRepSet *rs = lfirst(plc); /* Check if the changed repset is used by us. */ if (rs->id == replicated_set->id) { /* * In case this was delete, somebody deleted one of our * rep sets, bail here and let reconnect logic handle any * potential issues. */ if (change->action == REORDER_BUFFER_CHANGE_DELETE) elog(ERROR, "replication set \"%s\" used by this connection was deleted, existing", rs->name); /* This was update of our repset, update the cache. */ rs->replicate_insert = replicated_set->replicate_insert; rs->replicate_update = replicated_set->replicate_update; rs->replicate_delete = replicated_set->replicate_delete; rs->replicate_truncate = replicated_set->replicate_truncate; return false; } } return false; } /* Normal case - use replication set membership. */ tblinfo = get_table_replication_info(data->local_node_id, relation, data->replication_sets); /* First try filter out by change type. */ switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: if (!tblinfo->replicate_insert) return false; break; case REORDER_BUFFER_CHANGE_UPDATE: if (!tblinfo->replicate_update) return false; break; case REORDER_BUFFER_CHANGE_DELETE: if (!tblinfo->replicate_delete) return false; break; default: elog(ERROR, "Unhandled reorder buffer change type %d", change->action); return false; /* shut compiler up */ } /* * Proccess row filters. * XXX: we could probably cache some of the executor stuff. */ if (list_length(tblinfo->row_filter) > 0) { EState *estate; ExprContext *econtext; TupleDesc tupdesc = RelationGetDescr(relation); HeapTuple oldtup = change->data.tp.oldtuple ? &change->data.tp.oldtuple->tuple : NULL; HeapTuple newtup = change->data.tp.newtuple ? &change->data.tp.newtuple->tuple : NULL; /* Skip empty changes. */ if (!newtup && !oldtup) { elog(DEBUG1, "pglogical output got empty change"); return false; } PushActiveSnapshot(GetTransactionSnapshot()); estate = create_estate_for_relation(relation, false); econtext = prepare_per_tuple_econtext(estate, tupdesc); ExecStoreHeapTuple(newtup ? newtup : oldtup, econtext->ecxt_scantuple, false); /* Next try the row_filters if there are any. */ foreach (lc, tblinfo->row_filter) { Node *row_filter = (Node *) lfirst(lc); ExprState *exprstate = pglogical_prepare_row_filter(row_filter); Datum res; bool isnull; res = ExecEvalExpr(exprstate, econtext, &isnull, NULL); /* NULL is same as false for our use. */ if (isnull) return false; if (!DatumGetBool(res)) return false; } ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple); FreeExecutorState(estate); PopActiveSnapshot(); } /* Make sure caller is aware of any attribute filter. */ *att_list = tblinfo->att_list; return true; } static void pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change) { PGLogicalOutputData *data = ctx->output_plugin_private; MemoryContext old; Bitmapset *att_list = NULL; /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); /* First check the table filter */ if (!pglogical_change_filter(data, relation, change, &att_list)) return; /* * If the protocol wants to write relation information and the client * isn't known to have metadata cached for this relation already, * send relation metadata. * * TODO: track hit/miss stats */ if (data->api->write_rel != NULL) { PGLRelMetaCacheEntry *cached_relmeta; cached_relmeta = relmetacache_get_relation(data, relation); if (!cached_relmeta->is_cached) { OutputPluginPrepareWrite(ctx, false); data->api->write_rel(ctx->out, data, relation, att_list); OutputPluginWrite(ctx, false); cached_relmeta->is_cached = true; } } /* Send the data */ switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: OutputPluginPrepareWrite(ctx, true); data->api->write_insert(ctx->out, data, relation, &change->data.tp.newtuple->tuple, att_list); OutputPluginWrite(ctx, true); break; case REORDER_BUFFER_CHANGE_UPDATE: { HeapTuple oldtuple = change->data.tp.oldtuple ? &change->data.tp.oldtuple->tuple : NULL; OutputPluginPrepareWrite(ctx, true); data->api->write_update(ctx->out, data, relation, oldtuple, &change->data.tp.newtuple->tuple, att_list); OutputPluginWrite(ctx, true); break; } case REORDER_BUFFER_CHANGE_DELETE: if (change->data.tp.oldtuple) { OutputPluginPrepareWrite(ctx, true); data->api->write_delete(ctx->out, data, relation, &change->data.tp.oldtuple->tuple, att_list); OutputPluginWrite(ctx, true); } else elog(DEBUG1, "didn't send DELETE change because of missing oldtuple"); break; default: Assert(false); } /* Cleanup */ Assert(CurrentMemoryContext == data->context); MemoryContextSwitchTo(old); MemoryContextReset(data->context); } #ifdef HAVE_REPLICATION_ORIGINS /* * Decide if the whole transaction with specific origin should be filtered out. */ static bool pg_decode_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id) { PGLogicalOutputData *data = ctx->output_plugin_private; bool ret; if (origin_id == InvalidRepOriginId) /* Never filter out locally originated tx's */ ret = false; else /* * Otherwise, ignore the origin passed in txnfilter_args->origin_id, * and just forward all or nothing based on the configuration option * 'forward_origins'. */ ret = list_length(data->forward_origins) == 0; return ret; } #endif static void send_startup_message(LogicalDecodingContext *ctx, PGLogicalOutputData *data, bool last_message) { List *msg; Assert(!startup_message_sent); msg = prepare_startup_message(data); /* * We could free the extra_startup_params DefElem list here, but it's * pretty harmless to just ignore it, since it's in the decoding memory * context anyway, and we don't know if it's safe to free the defnames or * not. */ OutputPluginPrepareWrite(ctx, last_message); data->api->write_startup_message(ctx->out, msg); OutputPluginWrite(ctx, last_message); list_free_deep(msg); startup_message_sent = true; } /* * Shutdown callback. */ static void pg_decode_shutdown(LogicalDecodingContext * ctx) { relmetacache_flush(); VALGRIND_PRINTF("PGLOGICAL: output plugin shutdown\n"); /* * no need to delete data->context as it's child of ctx->context which * will expire on return. */ } /* * Relation metadata invalidation, for when a relcache invalidation * means that we need to resend table metadata to the client. */ static void relmetacache_invalidation_cb(Datum arg, Oid relid) { struct PGLRelMetaCacheEntry *hentry; Assert (RelMetaCache != NULL); /* * Nobody keeps pointers to entries in this hash table around outside * logical decoding callback calls - but invalidation events can come in * *during* a callback if we access the relcache in the callback. Because * of that we must mark the cache entry as invalid but not remove it from * the hash while it could still be referenced, then prune it at a later * safe point. * * Getting invalidations for relations that aren't in the table is * entirely normal, since there's no way to unregister for an * invalidation event. So we don't care if it's found or not. */ hentry = (struct PGLRelMetaCacheEntry *) hash_search(RelMetaCache, &relid, HASH_FIND, NULL); if (hentry != NULL) { hentry->is_valid = false; InvalidRelMetaCacheCnt++; } } /* * Initialize the relation metadata cache for a decoding session. * * The hash table is destoyed at the end of a decoding session. While * relcache invalidations still exist and will still be invoked, they * will just see the null hash table global and take no action. */ static void relmetacache_init(MemoryContext decoding_context) { HASHCTL ctl; int hash_flags; InvalidRelMetaCacheCnt = 0; if (RelMetaCache == NULL) { MemoryContext old_ctxt; RelMetaCacheContext = AllocSetContextCreate(TopMemoryContext, "pglogical output relmetacache", ALLOCSET_DEFAULT_SIZES); /* Make a new hash table for the cache */ hash_flags = HASH_ELEM | HASH_CONTEXT; MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(struct PGLRelMetaCacheEntry); ctl.hcxt = RelMetaCacheContext; #if PG_VERSION_NUM >= 90500 hash_flags |= HASH_BLOBS; #else ctl.hash = tag_hash; hash_flags |= HASH_FUNCTION; #endif old_ctxt = MemoryContextSwitchTo(RelMetaCacheContext); RelMetaCache = hash_create("pglogical relation metadata cache", RELMETACACHE_INITIAL_SIZE, &ctl, hash_flags); (void) MemoryContextSwitchTo(old_ctxt); Assert(RelMetaCache != NULL); CacheRegisterRelcacheCallback(relmetacache_invalidation_cb, (Datum)0); } } /* * Look up an entry, creating it if not found. * * Newly created entries are returned as is_cached=false. The API * hook can set is_cached to skip subsequent updates if it sent a * complete response that the client will cache. * * Returns true on a cache hit, false on a miss. */ static PGLRelMetaCacheEntry * relmetacache_get_relation(struct PGLogicalOutputData *data, Relation rel) { struct PGLRelMetaCacheEntry *hentry; bool found; MemoryContext old_mctx; /* Find cached function info, creating if not found */ old_mctx = MemoryContextSwitchTo(RelMetaCacheContext); hentry = (struct PGLRelMetaCacheEntry*) hash_search(RelMetaCache, (void *)(&RelationGetRelid(rel)), HASH_ENTER, &found); (void) MemoryContextSwitchTo(old_mctx); /* If not found or not valid, it can't be cached. */ if (!found || !hentry->is_valid) { Assert(hentry->relid = RelationGetRelid(rel)); hentry->is_cached = false; /* Only used for lazy purging of invalidations */ hentry->is_valid = true; } Assert(hentry != NULL); return hentry; } /* * Flush the relation metadata cache at the end of a decoding session. * * We cannot truly destroy the cache because it may be referenced by later * relcache invalidation callbacks after the end of a SQL-level decoding * session. */ static void relmetacache_flush(void) { HASH_SEQ_STATUS status; struct PGLRelMetaCacheEntry *hentry; if (RelMetaCache != NULL) { hash_seq_init(&status, RelMetaCache); while ((hentry = (struct PGLRelMetaCacheEntry*) hash_seq_search(&status)) != NULL) { if (hash_search(RelMetaCache, (void *) &hentry->relid, HASH_REMOVE, NULL) == NULL) elog(ERROR, "hash table corrupted"); } } } /* * Prune !is_valid entries from the relation metadata cache * * This must only be called when there couldn't be any references to * possibly-invalid entries. */ static void relmetacache_prune(void) { HASH_SEQ_STATUS status; struct PGLRelMetaCacheEntry *hentry; /* * Since the pruning can be expensive, do it only if ig we invalidated * at least half of initial cache size. */ if (InvalidRelMetaCacheCnt < RELMETACACHE_INITIAL_SIZE/2) return; hash_seq_init(&status, RelMetaCache); while ((hentry = (struct PGLRelMetaCacheEntry*) hash_seq_search(&status)) != NULL) { if (!hentry->is_valid) { if (hash_search(RelMetaCache, (void *) &hentry->relid, HASH_REMOVE, NULL) == NULL) elog(ERROR, "hash table corrupted"); } } InvalidRelMetaCacheCnt = 0; } /* * Clone of ReorderBufferCleanSerializedTXNs; see * https://www.postgresql.org/message-id/CAMsr+YHdX=XECbZshDZ2CZNWGTyw-taYBnzqVfx4JzM4ExP5xg@mail.gmail.com */ static void pglReorderBufferCleanSerializedTXNs(const char *slotname) { DIR *spill_dir; struct dirent *spill_de; struct stat statbuf; char path[MAXPGPATH * 2 + 12]; sprintf(path, "pg_replslot/%s", slotname); /* we're only handling directories here, skip if it's not ours */ if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) return; spill_dir = AllocateDir(path); while ((spill_de = ReadDirExtended(spill_dir, path, INFO)) != NULL) { /* only look at names that can be ours */ if (strncmp(spill_de->d_name, "xid", 3) == 0) { snprintf(path, sizeof(path), "pg_replslot/%s/%s", slotname, spill_de->d_name); if (unlink(path) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not remove file \"%s\" during removal of pg_replslot/%s/*.xid: %m", path, slotname))); } } FreeDir(spill_dir); } pglogical-REL2_4_1/pglogical_output_plugin.h000066400000000000000000000041371415142317000212770ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_output_plugin.h * pglogical output plugin * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_output_plugin.h * *------------------------------------------------------------------------- */ #ifndef PG_LOGICAL_OUTPUT_PLUGIN_H #define PG_LOGICAL_OUTPUT_PLUGIN_H #include "nodes/pg_list.h" #include "nodes/primnodes.h" /* summon cross-PG-version compatibility voodoo */ #include "pglogical_compat.h" /* typedef appears in pglogical_output_plugin.h */ typedef struct PGLogicalOutputData { MemoryContext context; struct PGLogicalProtoAPI *api; /* Cached node id */ Oid local_node_id; /* protocol */ bool allow_internal_basetypes; bool allow_binary_basetypes; bool forward_changeset_origins; int field_datum_encoding; /* * client info * * Lots of this should move to a separate shorter-lived struct used only * during parameter reading, since it contains what the client asked for. * Once we've processed this during startup we don't refer to it again. */ uint32 client_pg_version; uint32 client_max_proto_version; uint32 client_min_proto_version; const char *client_expected_encoding; const char *client_protocol_format; uint32 client_binary_basetypes_major_version; bool client_want_internal_basetypes_set; bool client_want_internal_basetypes; bool client_want_binary_basetypes_set; bool client_want_binary_basetypes; bool client_binary_bigendian_set; bool client_binary_bigendian; uint32 client_binary_sizeofdatum; uint32 client_binary_sizeofint; uint32 client_binary_sizeoflong; bool client_binary_float4byval_set; bool client_binary_float4byval; bool client_binary_float8byval_set; bool client_binary_float8byval; bool client_binary_intdatetimes_set; bool client_binary_intdatetimes; bool client_no_txinfo; /* List of origin names */ List *forward_origins; /* List of PGLogicalRepSet */ List *replication_sets; RangeVar *replicate_only_table; } PGLogicalOutputData; #endif /* PG_LOGICAL_OUTPUT_PLUGIN_H */ pglogical-REL2_4_1/pglogical_output_proto.c000066400000000000000000000026621415142317000211400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_proto.c * pglogical protocol functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_proto.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "replication/reorderbuffer.h" #include "pglogical_output_plugin.h" #include "pglogical_output_proto.h" #include "pglogical_proto_native.h" #include "pglogical_proto_json.h" PGLogicalProtoAPI * pglogical_init_api(PGLogicalProtoType typ) { PGLogicalProtoAPI *res = palloc0(sizeof(PGLogicalProtoAPI)); if (typ == PGLogicalProtoJson) { res->write_rel = NULL; res->write_begin = pglogical_json_write_begin; res->write_commit = pglogical_json_write_commit; res->write_origin = NULL; res->write_insert = pglogical_json_write_insert; res->write_update = pglogical_json_write_update; res->write_delete = pglogical_json_write_delete; res->write_startup_message = json_write_startup_message; } else { res->write_rel = pglogical_write_rel; res->write_begin = pglogical_write_begin; res->write_commit = pglogical_write_commit; res->write_origin = pglogical_write_origin; res->write_insert = pglogical_write_insert; res->write_update = pglogical_write_update; res->write_delete = pglogical_write_delete; res->write_startup_message = write_startup_message; } return res; } pglogical-REL2_4_1/pglogical_output_proto.h000066400000000000000000000067761415142317000211570ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_output_proto.h * pglogical protocol * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_output_proto.h * *------------------------------------------------------------------------- */ #ifndef PG_LOGICAL_OUTPUT_PROTO_H #define PG_LOGICAL_OUTPUT_PROTO_H #include "lib/stringinfo.h" #include "replication/reorderbuffer.h" #include "utils/relcache.h" #include "pglogical_output_plugin.h" /* * Protocol capabilities * * PGLOGICAL_PROTO_VERSION_NUM is our native protocol and the greatest version * we can support. PGLOGICAL_PROTO_MIN_VERSION_NUM is the oldest version we * have backwards compatibility for. We negotiate protocol versions during the * startup handshake. See the protocol documentation for details. */ #define PGLOGICAL_PROTO_VERSION_NUM 1 #define PGLOGICAL_PROTO_MIN_VERSION_NUM 1 /* * The startup parameter format is versioned separately to the rest of the wire * protocol because we negotiate the wire protocol version using the startup * parameters sent to us. It hopefully won't ever need to change, but this * field is present in case we do need to change it, e.g. to a structured json * object. We can look at the startup params version to see whether we can * understand the startup params sent by the client and to fall back to * reading an older format if needed. */ #define PGLOGICAL_STARTUP_PARAM_FORMAT_FLAT 1 /* * For similar reasons to the startup params * (PGLOGICAL_STARTUP_PARAM_FORMAT_FLAT) the startup reply message format is * versioned separately to the rest of the protocol. The client has to be able * to read it to find out what protocol version was selected by the upstream * when using the native protocol. */ #define PGLOGICAL_STARTUP_MSG_FORMAT_FLAT 1 typedef enum PGLogicalProtoType { PGLogicalProtoNative, PGLogicalProtoJson } PGLogicalProtoType; typedef void (*pglogical_write_rel_fn) (StringInfo out, PGLogicalOutputData * data, Relation rel, Bitmapset *att_list); typedef void (*pglogical_write_begin_fn) (StringInfo out, PGLogicalOutputData * data, ReorderBufferTXN *txn); typedef void (*pglogical_write_commit_fn) (StringInfo out, PGLogicalOutputData * data, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); typedef void (*pglogical_write_origin_fn) (StringInfo out, const char *origin, XLogRecPtr origin_lsn); typedef void (*pglogical_write_insert_fn) (StringInfo out, PGLogicalOutputData * data, Relation rel, HeapTuple newtuple, Bitmapset *att_list); typedef void (*pglogical_write_update_fn) (StringInfo out, PGLogicalOutputData * data, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list); typedef void (*pglogical_write_delete_fn) (StringInfo out, PGLogicalOutputData * data, Relation rel, HeapTuple oldtuple, Bitmapset *att_list); typedef void (*write_startup_message_fn) (StringInfo out, List *msg); typedef struct PGLogicalProtoAPI { pglogical_write_rel_fn write_rel; pglogical_write_begin_fn write_begin; pglogical_write_commit_fn write_commit; pglogical_write_origin_fn write_origin; pglogical_write_insert_fn write_insert; pglogical_write_update_fn write_update; pglogical_write_delete_fn write_delete; write_startup_message_fn write_startup_message; } PGLogicalProtoAPI; extern PGLogicalProtoAPI *pglogical_init_api(PGLogicalProtoType typ); #endif /* PG_LOGICAL_OUTPUT_PROTO_H */ pglogical-REL2_4_1/pglogical_proto_json.c000066400000000000000000000427161415142317000205550ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_proto_json.c * pglogical protocol functions for json support * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_proto_json.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/sysattr.h" #include "catalog/index.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_database.h" #include "catalog/pg_namespace.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "executor/spi.h" #include "libpq/pqformat.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "replication/reorderbuffer.h" #include "utils/builtins.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/syscache.h" #include "utils/timestamp.h" #include "utils/typcache.h" #include "pglogical_output_plugin.h" #include "pglogical_proto_json.h" #ifdef HAVE_REPLICATION_ORIGINS #include "replication/origin.h" #endif static void json_write_tuple(StringInfo out, Relation rel, HeapTuple tuple, Bitmapset *att_list); /* * Write BEGIN to the output stream. */ void pglogical_json_write_begin(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn) { appendStringInfoChar(out, '{'); appendStringInfoString(out, "\"action\":\"B\""); appendStringInfo(out, ", \"has_catalog_changes\":\"%c\"", rbtxn_has_catalog_changes(txn) ? 't' : 'f'); #ifdef HAVE_REPLICATION_ORIGINS if (txn->origin_id != InvalidRepOriginId) appendStringInfo(out, ", \"origin_id\":\"%u\"", txn->origin_id); #endif if (!data->client_no_txinfo) { appendStringInfo(out, ", \"xid\":\"%u\"", txn->xid); appendStringInfo(out, ", \"first_lsn\":\"%X/%X\"", (uint32)(txn->first_lsn >> 32), (uint32)(txn->first_lsn)); #ifdef HAVE_REPLICATION_ORIGINS appendStringInfo(out, ", \"origin_lsn\":\"%X/%X\"", (uint32)(txn->origin_lsn >> 32), (uint32)(txn->origin_lsn)); #endif if (txn->commit_time != 0) appendStringInfo(out, ", \"commit_time\":\"%s\"", timestamptz_to_str(txn->commit_time)); } appendStringInfoChar(out, '}'); } /* * Write COMMIT to the output stream. */ void pglogical_json_write_commit(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn, XLogRecPtr commit_lsn) { appendStringInfoChar(out, '{'); appendStringInfoString(out, "\"action\":\"C\""); if (!data->client_no_txinfo) { appendStringInfo(out, ", \"final_lsn\":\"%X/%X\"", (uint32)(txn->final_lsn >> 32), (uint32)(txn->final_lsn)); appendStringInfo(out, ", \"end_lsn\":\"%X/%X\"", (uint32)(txn->end_lsn >> 32), (uint32)(txn->end_lsn)); } appendStringInfoChar(out, '}'); } /* * Write change. * * Generic function handling DML changes. */ static void pglogical_json_write_change(StringInfo out, const char *change, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list) { appendStringInfoChar(out, '{'); appendStringInfo(out, "\"action\":\"%s\",\"relation\":[\"%s\",\"%s\"]", change, get_namespace_name(RelationGetNamespace(rel)), RelationGetRelationName(rel)); if (oldtuple) { appendStringInfoString(out, ",\"oldtuple\":"); json_write_tuple(out, rel, oldtuple, att_list); } if (newtuple) { appendStringInfoString(out, ",\"newtuple\":"); json_write_tuple(out, rel, newtuple, att_list); } appendStringInfoChar(out, '}'); } /* * Write INSERT to the output stream. */ void pglogical_json_write_insert(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple newtuple, Bitmapset *att_list) { pglogical_json_write_change(out, "I", rel, NULL, newtuple, att_list); } /* * Write UPDATE to the output stream. */ void pglogical_json_write_update(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list) { pglogical_json_write_change(out, "U", rel, oldtuple, newtuple, att_list); } /* * Write DELETE to the output stream. */ void pglogical_json_write_delete(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, Bitmapset *att_list) { pglogical_json_write_change(out, "D", rel, oldtuple, NULL, att_list); } /* * The startup message should be constructed as a json object, one * key/value per DefElem list member. */ void json_write_startup_message(StringInfo out, List *msg) { ListCell *lc; bool first = true; appendStringInfoString(out, "{\"action\":\"S\", \"params\": {"); foreach (lc, msg) { DefElem *param = (DefElem*)lfirst(lc); Assert(IsA(param->arg, String) && strVal(param->arg) != NULL); if (first) first = false; else appendStringInfoChar(out, ','); escape_json(out, param->defname); appendStringInfoChar(out, ':'); escape_json(out, strVal(param->arg)); } appendStringInfoString(out, "}}"); } /* * Functions taken from json.c * * Current as of commit 272adf4f9cd67df323ae57ff3dee238b649d3b73 */ #include "parser/parse_coerce.h" #include "utils/date.h" #include "utils/datetime.h" #if PG_VERSION_NUM >= 130000 #include "common/jsonapi.h" #else #include "utils/jsonapi.h" #endif /* * Determine how we want to print values of a given type in datum_to_json. * * Given the datatype OID, return its JsonTypeCategory, as well as the type's * output function OID. If the returned category is JSONTYPE_CAST, we * return the OID of the type->JSON cast function instead. */ typedef enum /* type categories for datum_to_json */ { JSONTYPE_NULL, /* null, so we didn't bother to identify */ JSONTYPE_BOOL, /* boolean (built-in types only) */ JSONTYPE_NUMERIC, /* numeric (ditto) */ JSONTYPE_DATE, /* we use special formatting for datetimes */ JSONTYPE_TIMESTAMP, JSONTYPE_TIMESTAMPTZ, JSONTYPE_JSON, /* JSON itself (and JSONB) */ JSONTYPE_ARRAY, /* array */ JSONTYPE_COMPOSITE, /* composite */ JSONTYPE_CAST, /* something with an explicit cast to JSON */ JSONTYPE_OTHER /* all else */ } JsonTypeCategory; static void composite_to_json(Datum composite, StringInfo result, bool use_line_feeds); static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals, bool *nulls, int *valcount, JsonTypeCategory tcategory, Oid outfuncoid, bool use_line_feeds); static void array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds); static void json_categorize_type(Oid typoid, JsonTypeCategory *tcategory, Oid *outfuncoid); static void json_categorize_type(Oid typoid, JsonTypeCategory *tcategory, Oid *outfuncoid) { bool typisvarlena; /* Look through any domain */ typoid = getBaseType(typoid); *outfuncoid = InvalidOid; /* * We need to get the output function for everything except date and * timestamp types, array and composite types, booleans, and non-builtin * types where there's a cast to json. */ switch (typoid) { case BOOLOID: *tcategory = JSONTYPE_BOOL; break; case INT2OID: case INT4OID: case INT8OID: case FLOAT4OID: case FLOAT8OID: case NUMERICOID: getTypeOutputInfo(typoid, outfuncoid, &typisvarlena); *tcategory = JSONTYPE_NUMERIC; break; case DATEOID: *tcategory = JSONTYPE_DATE; break; case TIMESTAMPOID: *tcategory = JSONTYPE_TIMESTAMP; break; case TIMESTAMPTZOID: *tcategory = JSONTYPE_TIMESTAMPTZ; break; case JSONOID: case JSONBOID: getTypeOutputInfo(typoid, outfuncoid, &typisvarlena); *tcategory = JSONTYPE_JSON; break; default: /* Check for arrays and composites */ if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONTYPE_ARRAY; else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONTYPE_COMPOSITE; else { /* It's probably the general case ... */ *tcategory = JSONTYPE_OTHER; /* but let's look for a cast to json, if it's not built-in */ if (typoid >= FirstNormalObjectId) { Oid castfunc; CoercionPathType ctype; ctype = find_coercion_pathway(JSONOID, typoid, COERCION_EXPLICIT, &castfunc); if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc)) { *tcategory = JSONTYPE_CAST; *outfuncoid = castfunc; } else { /* non builtin type with no cast */ getTypeOutputInfo(typoid, outfuncoid, &typisvarlena); } } else { /* any other builtin type */ getTypeOutputInfo(typoid, outfuncoid, &typisvarlena); } } break; } } /* * Turn a Datum into JSON text, appending the string to "result". * * tcategory and outfuncoid are from a previous call to json_categorize_type, * except that if is_null is true then they can be invalid. * * If key_scalar is true, the value is being printed as a key, so insist * it's of an acceptable type, and force it to be quoted. */ static void datum_to_json(Datum val, bool is_null, StringInfo result, JsonTypeCategory tcategory, Oid outfuncoid, bool key_scalar) { char *outputstr; text *jsontext; check_stack_depth(); /* callers are expected to ensure that null keys are not passed in */ Assert(!(key_scalar && is_null)); if (is_null) { appendStringInfoString(result, "null"); return; } if (key_scalar && (tcategory == JSONTYPE_ARRAY || tcategory == JSONTYPE_COMPOSITE || tcategory == JSONTYPE_JSON || tcategory == JSONTYPE_CAST)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("key value must be scalar, not array, composite, or json"))); switch (tcategory) { case JSONTYPE_ARRAY: array_to_json_internal(val, result, false); break; case JSONTYPE_COMPOSITE: composite_to_json(val, result, false); break; case JSONTYPE_BOOL: outputstr = DatumGetBool(val) ? "true" : "false"; if (key_scalar) escape_json(result, outputstr); else appendStringInfoString(result, outputstr); break; case JSONTYPE_NUMERIC: outputstr = OidOutputFunctionCall(outfuncoid, val); /* * Don't call escape_json for a non-key if it's a valid JSON * number. */ if (!key_scalar && IsValidJsonNumber(outputstr, strlen(outputstr))) appendStringInfoString(result, outputstr); else escape_json(result, outputstr); pfree(outputstr); break; case JSONTYPE_DATE: { DateADT date; struct pg_tm tm; char buf[MAXDATELEN + 1]; date = DatumGetDateADT(val); /* Same as date_out(), but forcing DateStyle */ if (DATE_NOT_FINITE(date)) EncodeSpecialDate(date, buf); else { j2date(date + POSTGRES_EPOCH_JDATE, &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday)); EncodeDateOnly(&tm, USE_XSD_DATES, buf); } appendStringInfo(result, "\"%s\"", buf); } break; case JSONTYPE_TIMESTAMP: { Timestamp timestamp; struct pg_tm tm; fsec_t fsec; char buf[MAXDATELEN + 1]; timestamp = DatumGetTimestamp(val); /* Same as timestamp_out(), but forcing DateStyle */ if (TIMESTAMP_NOT_FINITE(timestamp)) EncodeSpecialTimestamp(timestamp, buf); else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0) EncodeDateTime(&tm, fsec, false, 0, NULL, USE_XSD_DATES, buf); else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); appendStringInfo(result, "\"%s\"", buf); } break; case JSONTYPE_TIMESTAMPTZ: { TimestampTz timestamp; struct pg_tm tm; int tz; fsec_t fsec; const char *tzn = NULL; char buf[MAXDATELEN + 1]; timestamp = DatumGetTimestampTz(val); /* Same as timestamptz_out(), but forcing DateStyle */ if (TIMESTAMP_NOT_FINITE(timestamp)) EncodeSpecialTimestamp(timestamp, buf); else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0) EncodeDateTime(&tm, fsec, true, tz, tzn, USE_XSD_DATES, buf); else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); appendStringInfo(result, "\"%s\"", buf); } break; case JSONTYPE_JSON: /* JSON and JSONB output will already be escaped */ outputstr = OidOutputFunctionCall(outfuncoid, val); appendStringInfoString(result, outputstr); pfree(outputstr); break; case JSONTYPE_CAST: /* outfuncoid refers to a cast function, not an output function */ jsontext = DatumGetTextP(OidFunctionCall1(outfuncoid, val)); outputstr = text_to_cstring(jsontext); appendStringInfoString(result, outputstr); pfree(outputstr); pfree(jsontext); break; default: outputstr = OidOutputFunctionCall(outfuncoid, val); escape_json(result, outputstr); pfree(outputstr); break; } } /* * Process a single dimension of an array. * If it's the innermost dimension, output the values, otherwise call * ourselves recursively to process the next dimension. */ static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals, bool *nulls, int *valcount, JsonTypeCategory tcategory, Oid outfuncoid, bool use_line_feeds) { int i; const char *sep; Assert(dim < ndims); sep = use_line_feeds ? ",\n " : ","; appendStringInfoChar(result, '['); for (i = 1; i <= dims[dim]; i++) { if (i > 1) appendStringInfoString(result, sep); if (dim + 1 == ndims) { datum_to_json(vals[*valcount], nulls[*valcount], result, tcategory, outfuncoid, false); (*valcount)++; } else { /* * Do we want line feeds on inner dimensions of arrays? For now * we'll say no. */ array_dim_to_json(result, dim + 1, ndims, dims, vals, nulls, valcount, tcategory, outfuncoid, false); } } appendStringInfoChar(result, ']'); } /* * Turn an array into JSON. */ static void array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds) { ArrayType *v = DatumGetArrayTypeP(array); Oid element_type = ARR_ELEMTYPE(v); int *dim; int ndim; int nitems; int count = 0; Datum *elements; bool *nulls; int16 typlen; bool typbyval; char typalign; JsonTypeCategory tcategory; Oid outfuncoid; ndim = ARR_NDIM(v); dim = ARR_DIMS(v); nitems = ArrayGetNItems(ndim, dim); if (nitems <= 0) { appendStringInfoString(result, "[]"); return; } get_typlenbyvalalign(element_type, &typlen, &typbyval, &typalign); json_categorize_type(element_type, &tcategory, &outfuncoid); deconstruct_array(v, element_type, typlen, typbyval, typalign, &elements, &nulls, &nitems); array_dim_to_json(result, 0, ndim, dim, elements, nulls, &count, tcategory, outfuncoid, use_line_feeds); pfree(elements); pfree(nulls); } /* * Turn a composite / record into JSON. */ static void composite_to_json(Datum composite, StringInfo result, bool use_line_feeds) { HeapTupleHeader td; Oid tupType; int32 tupTypmod; TupleDesc tupdesc; HeapTupleData tmptup, *tuple; int i; bool needsep = false; const char *sep; sep = use_line_feeds ? ",\n " : ","; td = DatumGetHeapTupleHeader(composite); /* Extract rowtype info and find a tupdesc */ tupType = HeapTupleHeaderGetTypeId(td); tupTypmod = HeapTupleHeaderGetTypMod(td); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); /* Build a temporary HeapTuple control structure */ tmptup.t_len = HeapTupleHeaderGetDatumLength(td); tmptup.t_data = td; tuple = &tmptup; appendStringInfoChar(result, '{'); for (i = 0; i < tupdesc->natts; i++) { Datum val; bool isnull; char *attname; JsonTypeCategory tcategory; Oid outfuncoid; if (TupleDescAttr(tupdesc,i)->attisdropped) continue; if (needsep) appendStringInfoString(result, sep); needsep = true; attname = NameStr(TupleDescAttr(tupdesc,i)->attname); escape_json(result, attname); appendStringInfoChar(result, ':'); val = heap_getattr(tuple, i + 1, tupdesc, &isnull); if (isnull) { tcategory = JSONTYPE_NULL; outfuncoid = InvalidOid; } else json_categorize_type(TupleDescAttr(tupdesc,i)->atttypid, &tcategory, &outfuncoid); datum_to_json(val, isnull, result, tcategory, outfuncoid, false); } appendStringInfoChar(result, '}'); ReleaseTupleDesc(tupdesc); } /* * And finally the function that uses the above json functions. */ /* * Write a tuple to the outputstream, in the most efficient format possible. */ static void json_write_tuple(StringInfo out, Relation rel, HeapTuple tuple, Bitmapset *att_list) { TupleDesc tupdesc; int i; bool needsep = false; Datum values[MaxTupleAttributeNumber]; bool isnull[MaxTupleAttributeNumber]; tupdesc = RelationGetDescr(rel); appendStringInfoChar(out, '{'); heap_deform_tuple(tuple, tupdesc, values, isnull); for (i = 0; i < tupdesc->natts; i++) { Form_pg_attribute att = TupleDescAttr(tupdesc,i); JsonTypeCategory tcategory; Oid outfuncoid; /* skip dropped columns */ if (att->attisdropped) continue; if (att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, att_list)) continue; /* * Don't send unchanged toast column as we may not be able to fetch * them. */ if (!isnull[i] && att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) continue; if (needsep) appendStringInfoChar(out, ','); needsep = true; escape_json(out, NameStr(att->attname)); appendStringInfoChar(out, ':'); if (isnull[i]) { tcategory = JSONTYPE_NULL; outfuncoid = InvalidOid; } else json_categorize_type(att->atttypid, &tcategory, &outfuncoid); datum_to_json(values[i], isnull[i], out, tcategory, outfuncoid, false); } appendStringInfoChar(out, '}'); } pglogical-REL2_4_1/pglogical_proto_json.h000066400000000000000000000025561415142317000205600ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_proto_json.h * pglogical protocol, json implementation * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_proto_json.h * *------------------------------------------------------------------------- */ #ifndef PG_LOGICAL_PROTO_JSON_H #define PG_LOGICAL_PROTO_JSON_H #include "pglogical_output_plugin.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "pglogical_output_proto.h" extern void pglogical_json_write_begin(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn); extern void pglogical_json_write_commit(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); extern void pglogical_json_write_insert(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple newtuple, Bitmapset *att_list); extern void pglogical_json_write_update(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list); extern void pglogical_json_write_delete(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, Bitmapset *att_list); extern void json_write_startup_message(StringInfo out, List *msg); #endif /* PG_LOGICAL_PROTO_JSON_H */ pglogical-REL2_4_1/pglogical_proto_native.c000066400000000000000000000515621415142317000210710ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_proto_native.c * pglogical binary protocol functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_proto_native.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/sysattr.h" #if PG_VERSION_NUM >= 130000 #include "access/detoast.h" #else #include "access/tuptoaster.h" #endif #include "catalog/pg_type.h" #include "libpq/pqformat.h" #include "nodes/parsenodes.h" #include "replication/reorderbuffer.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" #include "pglogical_output_plugin.h" #include "pglogical_output_proto.h" #include "pglogical_proto_native.h" #define IS_REPLICA_IDENTITY 1 static void pglogical_write_attrs(StringInfo out, Relation rel, Bitmapset *att_list); static void pglogical_write_tuple(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple tuple, Bitmapset *att_list); static char decide_datum_transfer(Form_pg_attribute att, Form_pg_type typclass, bool allow_internal_basetypes, bool allow_binary_basetypes); static void pglogical_read_attrs(StringInfo in, char ***attrnames, int *nattrnames); static void pglogical_read_tuple(StringInfo in, PGLogicalRelation *rel, PGLogicalTupleData *tuple); /* * Write functions */ /* * Write relation description to the output stream. */ void pglogical_write_rel(StringInfo out, PGLogicalOutputData *data, Relation rel, Bitmapset *att_list) { char *nspname; uint8 nspnamelen; const char *relname; uint8 relnamelen; uint8 flags = 0; pq_sendbyte(out, 'R'); /* sending RELATION */ /* send the flags field */ pq_sendbyte(out, flags); /* use Oid as relation identifier */ pq_sendint(out, RelationGetRelid(rel), 4); nspname = get_namespace_name(rel->rd_rel->relnamespace); if (nspname == NULL) elog(ERROR, "cache lookup failed for namespace %u", rel->rd_rel->relnamespace); nspnamelen = strlen(nspname) + 1; relname = NameStr(rel->rd_rel->relname); relnamelen = strlen(relname) + 1; pq_sendbyte(out, nspnamelen); /* schema name length */ pq_sendbytes(out, nspname, nspnamelen); pq_sendbyte(out, relnamelen); /* table name length */ pq_sendbytes(out, relname, relnamelen); /* send the attribute info */ pglogical_write_attrs(out, rel, att_list); pfree(nspname); } /* * Write relation attributes to the outputstream. */ static void pglogical_write_attrs(StringInfo out, Relation rel, Bitmapset *att_list) { TupleDesc desc; int i; uint16 nliveatts = 0; Bitmapset *idattrs; desc = RelationGetDescr(rel); pq_sendbyte(out, 'A'); /* sending ATTRS */ /* send number of live attributes */ for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = TupleDescAttr(desc,i); if (att->attisdropped) continue; if (att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, att_list)) continue; nliveatts++; } pq_sendint(out, nliveatts, 2); /* fetch bitmap of REPLICATION IDENTITY attributes */ idattrs = RelationGetIndexAttrBitmap(rel, INDEX_ATTR_BITMAP_IDENTITY_KEY); /* send the attributes */ for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = TupleDescAttr(desc,i); uint8 flags = 0; uint16 len; const char *attname; if (att->attisdropped) continue; if (att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, att_list)) continue; if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, idattrs)) flags |= IS_REPLICA_IDENTITY; pq_sendbyte(out, 'C'); /* column definition follows */ pq_sendbyte(out, flags); pq_sendbyte(out, 'N'); /* column name block follows */ attname = NameStr(att->attname); len = strlen(attname) + 1; pq_sendint(out, len, 2); pq_sendbytes(out, attname, len); /* data */ } bms_free(idattrs); } /* * Write BEGIN to the output stream. */ void pglogical_write_begin(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn) { uint8 flags = 0; pq_sendbyte(out, 'B'); /* BEGIN */ /* send the flags field its self */ pq_sendbyte(out, flags); /* fixed fields */ pq_sendint64(out, txn->final_lsn); pq_sendint64(out, txn->commit_time); pq_sendint(out, txn->xid, 4); } /* * Write COMMIT to the output stream. */ void pglogical_write_commit(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn, XLogRecPtr commit_lsn) { uint8 flags = 0; pq_sendbyte(out, 'C'); /* sending COMMIT */ /* send the flags field */ pq_sendbyte(out, flags); /* send fixed fields */ pq_sendint64(out, commit_lsn); pq_sendint64(out, txn->end_lsn); pq_sendint64(out, txn->commit_time); } /* * Write ORIGIN to the output stream. */ void pglogical_write_origin(StringInfo out, const char *origin, XLogRecPtr origin_lsn) { uint8 flags = 0; uint8 len; Assert(strlen(origin) < 255); pq_sendbyte(out, 'O'); /* ORIGIN */ /* send the flags field its self */ pq_sendbyte(out, flags); /* fixed fields */ pq_sendint64(out, origin_lsn); /* origin */ len = strlen(origin) + 1; pq_sendbyte(out, len); pq_sendbytes(out, origin, len); } /* * Write INSERT to the output stream. */ void pglogical_write_insert(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple newtuple, Bitmapset *att_list) { uint8 flags = 0; pq_sendbyte(out, 'I'); /* action INSERT */ /* send the flags field */ pq_sendbyte(out, flags); /* use Oid as relation identifier */ pq_sendint(out, RelationGetRelid(rel), 4); pq_sendbyte(out, 'N'); /* new tuple follows */ pglogical_write_tuple(out, data, rel, newtuple, att_list); } /* * Write UPDATE to the output stream. */ void pglogical_write_update(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list) { uint8 flags = 0; pq_sendbyte(out, 'U'); /* action UPDATE */ /* send the flags field */ pq_sendbyte(out, flags); /* use Oid as relation identifier */ pq_sendint(out, RelationGetRelid(rel), 4); /* * TODO: support whole tuple (O tuple type) * * Right now we can only write the key-part since logical decoding * doesn't know how to record the whole old tuple for us in WAL. * We can't use REPLICA IDENTITY FULL for this, since that makes * the key-part the whole tuple, causing issues with conflict * resultion and index lookups. We need a separate decoding option * to record whole tuples. */ if (oldtuple != NULL) { pq_sendbyte(out, 'K'); /* old key follows */ pglogical_write_tuple(out, data, rel, oldtuple, att_list); } pq_sendbyte(out, 'N'); /* new tuple follows */ pglogical_write_tuple(out, data, rel, newtuple, att_list); } /* * Write DELETE to the output stream. */ void pglogical_write_delete(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, Bitmapset *att_list) { uint8 flags = 0; pq_sendbyte(out, 'D'); /* action DELETE */ /* send the flags field */ pq_sendbyte(out, flags); /* use Oid as relation identifier */ pq_sendint(out, RelationGetRelid(rel), 4); /* * TODO support whole tuple ('O' tuple type) * * See notes on update for details */ pq_sendbyte(out, 'K'); /* old key follows */ pglogical_write_tuple(out, data, rel, oldtuple, att_list); } /* * Most of the brains for startup message creation lives in * pglogical_config.c, so this presently just sends the set of key/value pairs. */ void write_startup_message(StringInfo out, List *msg) { ListCell *lc; pq_sendbyte(out, 'S'); /* message type field */ pq_sendbyte(out, PGLOGICAL_STARTUP_MSG_FORMAT_FLAT); /* startup message version */ foreach (lc, msg) { DefElem *param = (DefElem*)lfirst(lc); Assert(IsA(param->arg, String) && strVal(param->arg) != NULL); /* null-terminated key and value pairs, in client_encoding */ pq_sendstring(out, param->defname); pq_sendstring(out, strVal(param->arg)); } } /* * Write a tuple to the outputstream, in the most efficient format possible. */ static void pglogical_write_tuple(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple tuple, Bitmapset *att_list) { TupleDesc desc; Datum values[MaxTupleAttributeNumber]; bool isnull[MaxTupleAttributeNumber]; int i; uint16 nliveatts = 0; desc = RelationGetDescr(rel); pq_sendbyte(out, 'T'); /* sending TUPLE */ for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = TupleDescAttr(desc,i); if (att->attisdropped) continue; if (att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, att_list)) continue; nliveatts++; } pq_sendint(out, nliveatts, 2); /* try to allocate enough memory from the get go */ enlargeStringInfo(out, tuple->t_len + nliveatts * (1 + 4)); /* * XXX: should this prove to be a relevant bottleneck, it might be * interesting to inline heap_deform_tuple() here, we don't actually need * the information in the form we get from it. */ heap_deform_tuple(tuple, desc, values, isnull); for (i = 0; i < desc->natts; i++) { HeapTuple typtup; Form_pg_type typclass; Form_pg_attribute att = TupleDescAttr(desc,i); char transfer_type; /* skip dropped columns */ if (att->attisdropped) continue; if (att_list && !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, att_list)) continue; if (isnull[i]) { pq_sendbyte(out, 'n'); /* null column */ continue; } else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) { pq_sendbyte(out, 'u'); /* unchanged toast column */ continue; } typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(att->atttypid)); if (!HeapTupleIsValid(typtup)) elog(ERROR, "cache lookup failed for type %u", att->atttypid); typclass = (Form_pg_type) GETSTRUCT(typtup); transfer_type = decide_datum_transfer(att, typclass, data->allow_internal_basetypes, data->allow_binary_basetypes); switch (transfer_type) { case 'i': pq_sendbyte(out, 'i'); /* internal-format binary data follows */ /* pass by value */ if (att->attbyval) { pq_sendint(out, att->attlen, 4); /* length */ enlargeStringInfo(out, att->attlen); store_att_byval(out->data + out->len, values[i], att->attlen); out->len += att->attlen; out->data[out->len] = '\0'; } /* fixed length non-varlena pass-by-reference type */ else if (att->attlen > 0) { pq_sendint(out, att->attlen, 4); /* length */ appendBinaryStringInfo(out, DatumGetPointer(values[i]), att->attlen); } /* varlena type */ else if (att->attlen == -1) { char *data = DatumGetPointer(values[i]); /* send indirect datums inline */ if (VARATT_IS_EXTERNAL_INDIRECT(values[i])) { struct varatt_indirect redirect; VARATT_EXTERNAL_GET_POINTER(redirect, data); data = (char *) redirect.pointer; } Assert(!VARATT_IS_EXTERNAL(data)); pq_sendint(out, VARSIZE_ANY(data), 4); /* length */ appendBinaryStringInfo(out, data, VARSIZE_ANY(data)); } else elog(ERROR, "unsupported tuple type"); break; case 'b': { bytea *outputbytes; int len; pq_sendbyte(out, 'b'); /* binary send/recv data follows */ outputbytes = OidSendFunctionCall(typclass->typsend, values[i]); len = VARSIZE(outputbytes) - VARHDRSZ; pq_sendint(out, len, 4); /* length */ pq_sendbytes(out, VARDATA(outputbytes), len); /* data */ pfree(outputbytes); } break; default: { char *outputstr; int len; pq_sendbyte(out, 't'); /* 'text' data follows */ outputstr = OidOutputFunctionCall(typclass->typoutput, values[i]); len = strlen(outputstr) + 1; pq_sendint(out, len, 4); /* length */ appendBinaryStringInfo(out, outputstr, len); /* data */ pfree(outputstr); } } ReleaseSysCache(typtup); } } /* * Make the executive decision about which protocol to use. */ static char decide_datum_transfer(Form_pg_attribute att, Form_pg_type typclass, bool allow_internal_basetypes, bool allow_binary_basetypes) { /* * Use the binary protocol, if allowed, for builtin & plain datatypes. */ if (allow_internal_basetypes && typclass->typtype == 'b' && att->atttypid < FirstNormalObjectId && typclass->typelem == InvalidOid) { return 'i'; } /* * Use send/recv, if allowed, if the type is plain or builtin. * * XXX: we can't use send/recv for array or composite types for now due to * the embedded oids. */ else if (allow_binary_basetypes && OidIsValid(typclass->typreceive) && (att->atttypid < FirstNormalObjectId || typclass->typtype != 'c') && (att->atttypid < FirstNormalObjectId || typclass->typelem == InvalidOid)) { return 'b'; } return 't'; } /* * Read functions. */ /* * Read transaction BEGIN from the stream. */ void pglogical_read_begin(StringInfo in, XLogRecPtr *remote_lsn, TimestampTz *committime, TransactionId *remote_xid) { /* read flags */ uint8 flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* read fields */ *remote_lsn = pq_getmsgint64(in); Assert(*remote_lsn != InvalidXLogRecPtr); *committime = pq_getmsgint64(in); *remote_xid = pq_getmsgint(in, 4); } /* * Read transaction COMMIT from the stream. */ void pglogical_read_commit(StringInfo in, XLogRecPtr *commit_lsn, XLogRecPtr *end_lsn, TimestampTz *committime) { /* read flags */ uint8 flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* read fields */ *commit_lsn = pq_getmsgint64(in); *end_lsn = pq_getmsgint64(in); *committime = pq_getmsgint64(in); } /* * Read ORIGIN from the output stream. */ char * pglogical_read_origin(StringInfo in, XLogRecPtr *origin_lsn) { uint8 flags; uint8 len; /* read the flags */ flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* fixed fields */ *origin_lsn = pq_getmsgint64(in); /* origin */ len = pq_getmsgbyte(in); return pnstrdup(pq_getmsgbytes(in, len), len); } /* * Read INSERT from stream. * * Fills the new tuple. */ PGLogicalRelation * pglogical_read_insert(StringInfo in, LOCKMODE lockmode, PGLogicalTupleData *newtup) { char action; uint32 relid; uint8 flags; PGLogicalRelation *rel; /* read the flags */ flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* read the relation id */ relid = pq_getmsgint(in, 4); action = pq_getmsgbyte(in); if (action != 'N') elog(ERROR, "expected new tuple but got %d", action); rel = pglogical_relation_open(relid, lockmode); pglogical_read_tuple(in, rel, newtup); return rel; } /* * Read UPDATE from stream. */ PGLogicalRelation * pglogical_read_update(StringInfo in, LOCKMODE lockmode, bool *hasoldtup, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup) { char action; Oid relid; uint8 flags; PGLogicalRelation *rel; /* read the flags */ flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* read the relation id */ relid = pq_getmsgint(in, 4); /* read and verify action */ action = pq_getmsgbyte(in); if (action != 'K' && action != 'O' && action != 'N') elog(ERROR, "expected action 'N', 'O' or 'K', got %c", action); rel = pglogical_relation_open(relid, lockmode); /* check for old tuple */ if (action == 'K' || action == 'O') { pglogical_read_tuple(in, rel, oldtup); *hasoldtup = true; action = pq_getmsgbyte(in); } else *hasoldtup = false; /* check for new tuple */ if (action != 'N') elog(ERROR, "expected action 'N', got %c", action); pglogical_read_tuple(in, rel, newtup); return rel; } /* * Read DELETE from stream. * * Fills the old tuple. */ PGLogicalRelation * pglogical_read_delete(StringInfo in, LOCKMODE lockmode, PGLogicalTupleData *oldtup) { char action; Oid relid; uint8 flags; PGLogicalRelation *rel; /* read the flags */ flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ /* read the relation id */ relid = pq_getmsgint(in, 4); /* read and verify action */ action = pq_getmsgbyte(in); if (action != 'K' && action != 'O') elog(ERROR, "expected action 'O' or 'K' %c", action); rel = pglogical_relation_open(relid, lockmode); pglogical_read_tuple(in, rel, oldtup); return rel; } /* * Read tuple in remote format from stream. * * The returned tuple is converted to the local relation tuple format. */ static void pglogical_read_tuple(StringInfo in, PGLogicalRelation *rel, PGLogicalTupleData *tuple) { int i; int natts; char action; TupleDesc desc; action = pq_getmsgbyte(in); if (action != 'T') elog(ERROR, "expected TUPLE, got %c", action); memset(tuple->nulls, 1, sizeof(tuple->nulls)); memset(tuple->changed, 0, sizeof(tuple->changed)); natts = pq_getmsgint(in, 2); if (rel->natts != natts) elog(ERROR, "tuple natts mismatch between remote relation metadata cache (natts=%u) and remote tuple data (natts=%u)", rel->natts, natts); desc = RelationGetDescr(rel->rel); /* Read the data */ for (i = 0; i < natts; i++) { int attid = rel->attmap[i]; Form_pg_attribute att = TupleDescAttr(desc,attid); char kind = pq_getmsgbyte(in); const char *data; int len; switch (kind) { case 'n': /* null */ /* already marked as null */ tuple->values[attid] = 0xdeadbeef; tuple->changed[attid] = true; break; case 'u': /* unchanged column */ tuple->values[attid] = 0xfbadbeef; /* make bad usage more obvious */ break; case 'i': /* internal binary format */ tuple->nulls[attid] = false; tuple->changed[attid] = true; len = pq_getmsgint(in, 4); /* read length */ data = pq_getmsgbytes(in, len); /* and data */ if (att->attbyval) tuple->values[attid] = fetch_att(data, true, len); else tuple->values[attid] = PointerGetDatum(data); break; case 'b': /* binary send/recv format */ { Oid typreceive; Oid typioparam; StringInfoData buf; tuple->nulls[attid] = false; tuple->changed[attid] = true; len = pq_getmsgint(in, 4); /* read length */ getTypeBinaryInputInfo(att->atttypid, &typreceive, &typioparam); /* create StringInfo pointing into the bigger buffer */ initStringInfo(&buf); /* and data */ buf.data = (char *) pq_getmsgbytes(in, len); buf.len = len; tuple->values[attid] = OidReceiveFunctionCall( typreceive, &buf, typioparam, att->atttypmod); if (buf.len != buf.cursor) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("incorrect binary data format"))); break; } case 't': /* text format */ { Oid typinput; Oid typioparam; tuple->nulls[attid] = false; tuple->changed[attid] = true; len = pq_getmsgint(in, 4); /* read length */ getTypeInputInfo(att->atttypid, &typinput, &typioparam); /* and data */ data = (char *) pq_getmsgbytes(in, len); tuple->values[attid] = OidInputFunctionCall( typinput, (char *) data, typioparam, att->atttypmod); } break; default: elog(ERROR, "unknown data representation type '%c'", kind); } } } /* * Read schema.relation from stream and return as PGLogicalRelation opened in * lockmode. */ uint32 pglogical_read_rel(StringInfo in) { uint8 flags; uint32 relid; int len; char *schemaname; char *relname; int natts; char **attrnames; /* read the flags */ flags = pq_getmsgbyte(in); Assert(flags == 0); (void) flags; /* unused */ relid = pq_getmsgint(in, 4); /* Read relation from stream */ len = pq_getmsgbyte(in); schemaname = (char *) pq_getmsgbytes(in, len); len = pq_getmsgbyte(in); relname = (char *) pq_getmsgbytes(in, len); /* Get attribute description */ pglogical_read_attrs(in, &attrnames, &natts); pglogical_relation_cache_update(relid, schemaname, relname, natts, attrnames); return relid; } /* * Read relation attributes from the outputstream. * * TODO handle flags. */ static void pglogical_read_attrs(StringInfo in, char ***attrnames, int *nattrnames) { int i; uint16 nattrs; char **attrs; char blocktype; blocktype = pq_getmsgbyte(in); if (blocktype != 'A') elog(ERROR, "expected ATTRS, got %c", blocktype); nattrs = pq_getmsgint(in, 2); attrs = palloc(nattrs * sizeof(char *)); /* read the attributes */ for (i = 0; i < nattrs; i++) { uint16 len; blocktype = pq_getmsgbyte(in); /* column definition follows */ if (blocktype != 'C') elog(ERROR, "expected COLUMN, got %c", blocktype); /* read flags (we ignore them so far) */ (void) pq_getmsgbyte(in); blocktype = pq_getmsgbyte(in); /* column name block follows */ if (blocktype != 'N') elog(ERROR, "expected NAME, got %c", blocktype); /* attribute name */ len = pq_getmsgint(in, 2); /* the string is NULL terminated */ attrs[i] = (char *) pq_getmsgbytes(in, len); } *attrnames = attrs; *nattrnames = nattrs; } pglogical-REL2_4_1/pglogical_proto_native.h000066400000000000000000000047341415142317000210750ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_proto_native.h * pglogical protocol, native implementation * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_proto_native.h * *------------------------------------------------------------------------- */ #ifndef PG_LOGICAL_PROTO_NATIVE_H #define PG_LOGICAL_PROTO_NATIVE_H #include "lib/stringinfo.h" #include "utils/timestamp.h" #include "pglogical_output_plugin.h" #include "pglogical_output_proto.h" #include "pglogical_relcache.h" typedef struct PGLogicalTupleData { Datum values[MaxTupleAttributeNumber]; bool nulls[MaxTupleAttributeNumber]; bool changed[MaxTupleAttributeNumber]; } PGLogicalTupleData; extern void pglogical_write_rel(StringInfo out, PGLogicalOutputData *data, Relation rel, Bitmapset *att_list); extern void pglogical_write_begin(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn); extern void pglogical_write_commit(StringInfo out, PGLogicalOutputData *data, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); extern void pglogical_write_origin(StringInfo out, const char *origin, XLogRecPtr origin_lsn); extern void pglogical_write_insert(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple newtuple, Bitmapset *att_list); extern void pglogical_write_update(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, HeapTuple newtuple, Bitmapset *att_list); extern void pglogical_write_delete(StringInfo out, PGLogicalOutputData *data, Relation rel, HeapTuple oldtuple, Bitmapset *att_list); extern void write_startup_message(StringInfo out, List *msg); extern void pglogical_read_begin(StringInfo in, XLogRecPtr *remote_lsn, TimestampTz *committime, TransactionId *remote_xid); extern void pglogical_read_commit(StringInfo in, XLogRecPtr *commit_lsn, XLogRecPtr *end_lsn, TimestampTz *committime); extern char *pglogical_read_origin(StringInfo in, XLogRecPtr *origin_lsn); extern uint32 pglogical_read_rel(StringInfo in); extern PGLogicalRelation *pglogical_read_insert(StringInfo in, LOCKMODE lockmode, PGLogicalTupleData *newtup); extern PGLogicalRelation *pglogical_read_update(StringInfo in, LOCKMODE lockmode, bool *hasoldtup, PGLogicalTupleData *oldtup, PGLogicalTupleData *newtup); extern PGLogicalRelation *pglogical_read_delete(StringInfo in, LOCKMODE lockmode, PGLogicalTupleData *oldtup); #endif /* PG_LOGICAL_PROTO_NATIVE_H */ pglogical-REL2_4_1/pglogical_queue.c000066400000000000000000000141201415142317000174710ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_queue.c * pglogical queue and connection catalog manipulation functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_queue.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/objectaddress.h" #include "catalog/pg_extension.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "commands/trigger.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "parser/parse_func.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/json.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/timestamp.h" #include "pglogical_queue.h" #include "pglogical.h" #define CATALOG_QUEUE "queue" #define Natts_queue 5 #define Anum_queue_queued_at 1 #define Anum_queue_role 2 #define Anum_queue_replication_sets 3 #define Anum_queue_message_type 4 #define Anum_queue_message 5 typedef struct QueueTuple { TimestampTz queued_at; NameData replication_set; NameData role; char message_type; /* json message;*/ } QueueTuple; /* * Add tuple to the queue table. */ void queue_message(List *replication_sets, Oid roleoid, char message_type, char *message) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_queue]; bool nulls[Natts_queue]; const char *role; TimestampTz ts = GetCurrentTimestamp(); role = GetUserNameFromId(roleoid #if PG_VERSION_NUM >= 90500 , false #endif ); rv = makeRangeVar(EXTENSION_NAME, CATALOG_QUEUE, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_queue_queued_at - 1] = TimestampTzGetDatum(ts); values[Anum_queue_role - 1] = DirectFunctionCall1(namein, CStringGetDatum(role)); if (replication_sets) values[Anum_queue_replication_sets - 1] = PointerGetDatum(strlist_to_textarray(replication_sets)); else nulls[Anum_queue_replication_sets - 1] = true; values[Anum_queue_message_type - 1] = CharGetDatum(message_type); values[Anum_queue_message - 1] = DirectFunctionCall1(json_in, CStringGetDatum(message)); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, NoLock); } /* * Parse the tuple from the queue table into palloc'd QueuedMessage struct. * * The caller must have the queue table locked in at least AccessShare mode. */ QueuedMessage * queued_message_from_tuple(HeapTuple queue_tup) { RangeVar *rv; Relation rel; TupleDesc tupDesc; bool isnull; Datum d; QueuedMessage *res; /* Open relation to get the tuple descriptor. */ rv = makeRangeVar(EXTENSION_NAME, CATALOG_QUEUE, -1); rel = table_openrv(rv, NoLock); tupDesc = RelationGetDescr(rel); res = (QueuedMessage *) palloc(sizeof(QueuedMessage)); d = fastgetattr(queue_tup, Anum_queue_queued_at, tupDesc, &isnull); Assert(!isnull); res->queued_at = DatumGetTimestampTz(d); d = fastgetattr(queue_tup, Anum_queue_role, tupDesc, &isnull); Assert(!isnull); res->role = pstrdup(NameStr(*DatumGetName(d))); d = fastgetattr(queue_tup, Anum_queue_replication_sets, tupDesc, &isnull); if (!isnull) res->replication_sets = textarray_to_list(DatumGetArrayTypeP(d)); else res->replication_sets = NULL; d = fastgetattr(queue_tup, Anum_queue_message_type, tupDesc, &isnull); Assert(!isnull); res->message_type = DatumGetChar(d); d = fastgetattr(queue_tup, Anum_queue_message, tupDesc, &isnull); Assert(!isnull); /* Parse the json inside the message into Jsonb object. */ res->message = DatumGetJsonb( DirectFunctionCall1(jsonb_in, DirectFunctionCall1(json_out, d))); /* Close the relation. */ table_close(rel, NoLock); return res; } /* * Get (cached) oid of the queue table. */ Oid get_queue_table_oid(void) { static Oid queuetableoid = InvalidOid; if (queuetableoid == InvalidOid) queuetableoid = get_pglogical_table_oid(CATALOG_QUEUE); return queuetableoid; } /* * Create a TRUNCATE trigger for a persistent table and mark * it tgisinternal so that it's not dumped by pg_dump. * * This is basically wrapper around CreateTrigger(). */ void create_truncate_trigger(Relation rel) { CreateTrigStmt *tgstmt; ObjectAddress trgobj; ObjectAddress extension; Oid fargtypes[1]; List *funcname = list_make2(makeString(EXTENSION_NAME), makeString("queue_truncate")); /* * Check for already existing trigger on the table to avoid adding * duplicate ones. */ if (rel->trigdesc) { Trigger *trigger = rel->trigdesc->triggers; int i; Oid funcoid = LookupFuncName(funcname, 0, fargtypes, false); for (i = 0; i < rel->trigdesc->numtriggers; i++) { if (!TRIGGER_FOR_TRUNCATE(trigger->tgtype)) continue; if (trigger->tgfoid == funcoid) return; trigger++; } } tgstmt = makeNode(CreateTrigStmt); tgstmt->trigname = "queue_truncate_trigger"; tgstmt->relation = NULL; tgstmt->funcname = funcname; tgstmt->args = NIL; tgstmt->row = false; tgstmt->timing = TRIGGER_TYPE_AFTER; tgstmt->events = TRIGGER_TYPE_TRUNCATE; tgstmt->columns = NIL; tgstmt->whenClause = NULL; tgstmt->isconstraint = false; tgstmt->deferrable = false; tgstmt->initdeferred = false; tgstmt->constrrel = NULL; trgobj = PGLCreateTrigger(tgstmt, NULL, RelationGetRelid(rel), InvalidOid, InvalidOid, InvalidOid, true /* tgisinternal */); extension.classId = ExtensionRelationId; extension.objectId = get_extension_oid(EXTENSION_NAME, false); extension.objectSubId = 0; recordDependencyOn(&trgobj, &extension, DEPENDENCY_AUTO); /* Make the new trigger visible within this session */ CommandCounterIncrement(); } pglogical-REL2_4_1/pglogical_queue.h000066400000000000000000000020301415142317000174730ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_node.h * pglogical node and connection catalog manipulation functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_node.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_QUEUE_H #define PGLOGICAL_QUEUE_H #include "utils/jsonb.h" #define QUEUE_COMMAND_TYPE_SQL 'Q' #define QUEUE_COMMAND_TYPE_TRUNCATE 'T' #define QUEUE_COMMAND_TYPE_TABLESYNC 'A' #define QUEUE_COMMAND_TYPE_SEQUENCE 'S' typedef struct QueuedMessage { TimestampTz queued_at; List *replication_sets; char *role; char message_type; Jsonb *message; } QueuedMessage; extern void queue_message(List *replication_sets, Oid roleoid, char message_type, char *message); extern QueuedMessage *queued_message_from_tuple(HeapTuple queue_tup); extern Oid get_queue_table_oid(void); extern void create_truncate_trigger(Relation rel); #endif /* PGLOGICAL_NODE_H */ pglogical-REL2_4_1/pglogical_relcache.c000066400000000000000000000156571415142317000201330ustar00rootroot00000000000000/* ------------------------------------------------------------------------- * * pglogical_relcache.c * Caching relation specific information * * Copyright (C) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_relcache.c * * ------------------------------------------------------------------------- */ #include "postgres.h" #include "access/heapam.h" #include "catalog/pg_trigger.h" #include "commands/trigger.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/hsearch.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/rel.h" #include "pglogical.h" #include "pglogical_relcache.h" #define PGLOGICALRELATIONHASH_INITIAL_SIZE 128 static HTAB *PGLogicalRelationHash = NULL; static void pglogical_relcache_init(void); static int tupdesc_get_att_by_name(TupleDesc desc, const char *attname); static void relcache_free_entry(PGLogicalRelation *entry) { pfree(entry->nspname); pfree(entry->relname); if (entry->natts > 0) { int i; for (i = 0; i < entry->natts; i++) pfree(entry->attnames[i]); pfree(entry->attnames); } if (entry->attmap) pfree(entry->attmap); entry->natts = 0; entry->reloid = InvalidOid; entry->rel = NULL; } PGLogicalRelation * pglogical_relation_open(uint32 remoteid, LOCKMODE lockmode) { PGLogicalRelation *entry; bool found; if (PGLogicalRelationHash == NULL) pglogical_relcache_init(); /* Search for existing entry. */ entry = hash_search(PGLogicalRelationHash, (void *) &remoteid, HASH_FIND, &found); if (!found) elog(ERROR, "cache lookup failed for remote relation %u", remoteid); /* Need to update the local cache? */ if (!OidIsValid(entry->reloid)) { RangeVar *rv = makeNode(RangeVar); int i; TupleDesc desc; rv->schemaname = (char *) entry->nspname; rv->relname = (char *) entry->relname; entry->rel = table_openrv(rv, lockmode); desc = RelationGetDescr(entry->rel); for (i = 0; i < entry->natts; i++) entry->attmap[i] = tupdesc_get_att_by_name(desc, entry->attnames[i]); entry->reloid = RelationGetRelid(entry->rel); /* Cache trigger info. */ entry->hasTriggers = false; if (entry->rel->trigdesc != NULL) { TriggerDesc *trigdesc = entry->rel->trigdesc; int i; for (i = 0; i < trigdesc->numtriggers; i++) { Trigger *trigger = &trigdesc->triggers[i]; /* We only fire replica triggers on rows */ if (!(trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN || trigger->tgenabled == TRIGGER_DISABLED) && TRIGGER_FOR_ROW(trigger->tgtype)) { entry->hasTriggers = true; break; } } } } else if (!entry->rel) entry->rel = table_open(entry->reloid, lockmode); return entry; } void pglogical_relation_cache_update(uint32 remoteid, char *schemaname, char *relname, int natts, char **attnames) { MemoryContext oldcontext; PGLogicalRelation *entry; bool found; int i; if (PGLogicalRelationHash == NULL) pglogical_relcache_init(); /* * HASH_ENTER returns the existing entry if present or creates a new one. */ entry = hash_search(PGLogicalRelationHash, (void *) &remoteid, HASH_ENTER, &found); if (found) relcache_free_entry(entry); /* Make cached copy of the data */ oldcontext = MemoryContextSwitchTo(CacheMemoryContext); entry->nspname = pstrdup(schemaname); entry->relname = pstrdup(relname); entry->natts = natts; entry->attnames = palloc(natts * sizeof(char *)); for (i = 0; i < natts; i++) entry->attnames[i] = pstrdup(attnames[i]); entry->attmap = palloc(natts * sizeof(int)); MemoryContextSwitchTo(oldcontext); /* XXX Should we validate the relation against local schema here? */ entry->reloid = InvalidOid; } void pglogical_relation_cache_updater(PGLogicalRemoteRel *remoterel) { MemoryContext oldcontext; PGLogicalRelation *entry; bool found; int i; if (PGLogicalRelationHash == NULL) pglogical_relcache_init(); /* * HASH_ENTER returns the existing entry if present or creates a new one. */ entry = hash_search(PGLogicalRelationHash, (void *) &remoterel->relid, HASH_ENTER, &found); if (found) relcache_free_entry(entry); /* Make cached copy of the data */ oldcontext = MemoryContextSwitchTo(CacheMemoryContext); entry->nspname = pstrdup(remoterel->nspname); entry->relname = pstrdup(remoterel->relname); entry->natts = remoterel->natts; entry->attnames = palloc(remoterel->natts * sizeof(char *)); for (i = 0; i < remoterel->natts; i++) entry->attnames[i] = pstrdup(remoterel->attnames[i]); entry->attmap = palloc(remoterel->natts * sizeof(int)); MemoryContextSwitchTo(oldcontext); /* XXX Should we validate the relation against local schema here? */ entry->reloid = InvalidOid; } void pglogical_relation_close(PGLogicalRelation * rel, LOCKMODE lockmode) { table_close(rel->rel, lockmode); rel->rel = NULL; } static void pglogical_relcache_invalidate_callback(Datum arg, Oid reloid) { PGLogicalRelation *entry; /* Just to be sure. */ if (PGLogicalRelationHash == NULL) return; if (reloid != InvalidOid) { HASH_SEQ_STATUS status; hash_seq_init(&status, PGLogicalRelationHash); /* TODO, use inverse lookup hastable */ while ((entry = (PGLogicalRelation *) hash_seq_search(&status)) != NULL) { if (entry->reloid == reloid) entry->reloid = InvalidOid; } } else { /* invalidate all cache entries */ HASH_SEQ_STATUS status; hash_seq_init(&status, PGLogicalRelationHash); while ((entry = (PGLogicalRelation *) hash_seq_search(&status)) != NULL) entry->reloid = InvalidOid; } } static void pglogical_relcache_init(void) { HASHCTL ctl; int hashflags; /* Make sure we've initialized CacheMemoryContext. */ if (CacheMemoryContext == NULL) CreateCacheMemoryContext(); /* Initialize the hash table. */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(uint32); ctl.entrysize = sizeof(PGLogicalRelation); ctl.hcxt = CacheMemoryContext; hashflags = HASH_ELEM | HASH_CONTEXT; #if PG_VERSION_NUM < 90500 /* * Handle the old hash API in PostgreSQL 9.4. * Note, this assumes that Oid is uint32 which is the case for 9.4 anyway. * * See postgres commit: * * 4a14f13a0ab Improve hash_create's API for selecting simple-binary-key hash functions. */ ctl.hash = oid_hash; hashflags |= HASH_FUNCTION; #else hashflags |= HASH_BLOBS; #endif PGLogicalRelationHash = hash_create("pglogical relation cache", PGLOGICALRELATIONHASH_INITIAL_SIZE, &ctl, hashflags); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(pglogical_relcache_invalidate_callback, (Datum) 0); } /* * Find attribute index in TupleDesc struct by attribute name. */ static int tupdesc_get_att_by_name(TupleDesc desc, const char *attname) { int i; for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = TupleDescAttr(desc,i); if (strcmp(NameStr(att->attname), attname) == 0) return i; } elog(ERROR, "unknown column name %s", attname); } pglogical-REL2_4_1/pglogical_relcache.h000066400000000000000000000030321415142317000201200ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_relcache.h * pglogical relation cache * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_relcache.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_RELCACHE_H #define PGLOGICAL_RELCACHE_H #include "storage/lock.h" typedef struct PGLogicalRemoteRel { uint32 relid; char *nspname; char *relname; int natts; char **attnames; /* Only returned by info function, not protocol. */ bool hasRowFilter; } PGLogicalRemoteRel; typedef struct PGLogicalRelation { /* Info coming from the remote side. */ uint32 remoteid; char *nspname; char *relname; int natts; char **attnames; /* Mapping to local relation, filled as needed. */ Oid reloid; Relation rel; int *attmap; /* Additional cache, only valid as long as relation mapping is. */ bool hasTriggers; } PGLogicalRelation; extern void pglogical_relation_cache_update(uint32 remoteid, char *schemaname, char *relname, int natts, char **attnames); extern void pglogical_relation_cache_updater(PGLogicalRemoteRel *remoterel); extern PGLogicalRelation *pglogical_relation_open(uint32 remoteid, LOCKMODE lockmode); extern void pglogical_relation_close(PGLogicalRelation * rel, LOCKMODE lockmode); extern void pglogical_relation_invalidate_cb(Datum arg, Oid reloid); struct PGLogicalTupleData; #endif /* PGLOGICAL_RELCACHE_H */ pglogical-REL2_4_1/pglogical_repset.c000066400000000000000000001106051415142317000176540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_repset.c * pglogical replication set manipulation functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_repset.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/objectaddress.h" #include "catalog/pg_type.h" #include "executor/spi.h" #include "nodes/makefuncs.h" #include "replication/reorderbuffer.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "pglogical_dependency.h" #include "pglogical_node.h" #include "pglogical_queue.h" #include "pglogical_repset.h" #include "pglogical.h" #define CATALOG_REPSET "replication_set" #define CATALOG_REPSET_SEQ "replication_set_seq" #define CATALOG_REPSET_TABLE "replication_set_table" #define CATALOG_REPSET_RELATION "replication_set_relation" typedef struct RepSetTuple { Oid id; Oid nodeid; NameData name; bool replicate_insert; bool replicate_update; bool replicate_delete; bool replicate_truncate; } RepSetTuple; #define Natts_repset 7 #define Anum_repset_id 1 #define Anum_repset_nodeid 2 #define Anum_repset_name 3 #define Anum_repset_replicate_insert 4 #define Anum_repset_replicate_update 5 #define Anum_repset_replicate_delete 6 #define Anum_repset_replicate_truncate 7 typedef struct RepSetSeqTuple { Oid id; Oid seqoid; } RepSetSeqTuple; #define Natts_repset_seq 2 #define Anum_repset_seq_setid 1 #define Anum_repset_seq_seqoid 2 typedef struct RepSetTableTuple { Oid setid; Oid reloid; #if 0 /* Only for info here. */ text att_list[1]; text row_filter; #endif } RepSetTableTuple; #define Natts_repset_table 4 #define Anum_repset_table_setid 1 #define Anum_repset_table_reloid 2 #define Anum_repset_table_att_list 3 #define Anum_repset_table_row_filter 4 #define REPSETTABLEHASH_INITIAL_SIZE 128 static HTAB *RepSetTableHash = NULL; /* * Read the replication set. */ PGLogicalRepSet * get_replication_set(Oid setid) { PGLogicalRepSet *repset; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for repset record. */ ScanKeyInit(&key[0], Anum_repset_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "replication set %u not found", setid); repset = replication_set_from_tuple(tuple); systable_endscan(scan); table_close(rel, RowExclusiveLock); return repset; } /* * Find replication set by name */ PGLogicalRepSet * get_replication_set_by_name(Oid nodeid, const char *setname, bool missing_ok) { PGLogicalRepSet *repset; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[2]; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for repset record. */ ScanKeyInit(&key[0], Anum_repset_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); ScanKeyInit(&key[1], Anum_repset_name, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(setname)); scan = systable_beginscan(rel, 0, true, NULL, 2, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } elog(ERROR, "replication set %s not found", setname); } repset = replication_set_from_tuple(tuple); systable_endscan(scan); table_close(rel, RowExclusiveLock); return repset; } static void repset_relcache_invalidate_callback(Datum arg, Oid reloid) { PGLogicalTableRepInfo *entry; /* Just to be sure. */ if (RepSetTableHash == NULL) return; if (reloid == InvalidOid) { HASH_SEQ_STATUS status; hash_seq_init(&status, RepSetTableHash); while ((entry = hash_seq_search(&status)) != NULL) { entry->isvalid = false; if (entry->att_list) pfree(entry->att_list); entry->att_list = NULL; if (list_length(entry->row_filter)) list_free_deep(entry->row_filter); entry->row_filter = NIL; } } else if ((entry = hash_search(RepSetTableHash, &reloid, HASH_FIND, NULL)) != NULL) { entry->isvalid = false; if (entry->att_list) pfree(entry->att_list); entry->att_list = NULL; if (list_length(entry->row_filter)) list_free_deep(entry->row_filter); entry->row_filter = NIL; } } static void repset_relcache_init(void) { HASHCTL ctl; int hashflags; /* Make sure we've initialized CacheMemoryContext. */ if (CacheMemoryContext == NULL) CreateCacheMemoryContext(); /* Initialize the hash table. */ MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PGLogicalTableRepInfo); ctl.hcxt = CacheMemoryContext; hashflags = HASH_ELEM | HASH_CONTEXT; #if PG_VERSION_NUM < 90500 /* * Handle the old hash API in PostgreSQL 9.4. * * See postgres commit: * * 4a14f13a0ab Improve hash_create's API for selecting simple-binary-key hash functions. */ ctl.hash = oid_hash; hashflags |= HASH_FUNCTION; #else hashflags |= HASH_BLOBS; #endif RepSetTableHash = hash_create("pglogical repset table cache", REPSETTABLEHASH_INITIAL_SIZE, &ctl, hashflags); /* * Watch for invalidation events fired when the relcache changes. * * Note that no invalidations are fired when the replication sets are * created, destroyed, modified, or change membership since there's no * syscache management for user catalogs. We do our own invalidations for * those separately. */ CacheRegisterRelcacheCallback(repset_relcache_invalidate_callback, (Datum) 0); } List * get_node_replication_sets(Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; List *replication_sets = NIL; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_repset_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetTuple *t = (RepSetTuple *) GETSTRUCT(tuple); PGLogicalRepSet *repset = get_replication_set(t->id); replication_sets = lappend(replication_sets, repset); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return replication_sets; } List * get_replication_sets(Oid nodeid, List *replication_set_names, bool missing_ok) { RangeVar *rv; Relation rel; ListCell *lc; ScanKeyData key[2]; List *replication_sets = NIL; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); /* Setup common part of key. */ ScanKeyInit(&key[0], Anum_repset_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); foreach(lc, replication_set_names) { char *setname = lfirst(lc); SysScanDesc scan; HeapTuple tuple; /* Search for repset record. */ ScanKeyInit(&key[1], Anum_repset_name, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(setname)); /* TODO: use index. */ scan = systable_beginscan(rel, 0, true, NULL, 2, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); continue; } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication set %s not found", setname))); } replication_sets = lappend(replication_sets, replication_set_from_tuple(tuple)); systable_endscan(scan); } table_close(rel, RowExclusiveLock); return replication_sets; } PGLogicalTableRepInfo * get_table_replication_info(Oid nodeid, Relation table, List *subs_replication_sets) { PGLogicalTableRepInfo *entry; bool found; RangeVar *rv; Oid reloid = RelationGetRelid(table); Oid repset_reloid; Relation repset_rel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tuple; TupleDesc table_desc, repset_rel_desc; if (RepSetTableHash == NULL) repset_relcache_init(); /* * HASH_ENTER returns the existing entry if present or creates a new one. * * It might seem that it's weird to use just reloid here for the cache key * when we are searching for nodeid + relation. But this function is only * used by the output plugin which means the nodeid is always the same as * only one node is connected to current process. */ entry = hash_search(RepSetTableHash, (void *) &reloid, HASH_ENTER, &found); if (found && entry->isvalid) return entry; /* Fill the entry */ entry->reloid = reloid; entry->replicate_insert = false; entry->replicate_update = false; entry->replicate_delete = false; entry->att_list = NULL; entry->row_filter = NIL; /* * Check for match between table's replication sets and the subscription * list of replication sets that was given as parameter. * * Note that tables can have no replication sets. This will be commonly * true for example for internal tables which are created during table * rewrites, so if we'll want to support replicating those, we'll have * to have special handling for them. */ rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); repset_reloid = RangeVarGetRelid(rv, RowExclusiveLock, true); /* Backwards compat with 1.1/1.2 where the relation name was different. */ if (!OidIsValid(repset_reloid)) { rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_RELATION, -1); repset_reloid = RangeVarGetRelid(rv, RowExclusiveLock, true); if (!OidIsValid(repset_reloid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s.%s\" does not exist", rv->schemaname, rv->relname))); } repset_rel = table_open(repset_reloid, NoLock); repset_rel_desc = RelationGetDescr(repset_rel); table_desc = RelationGetDescr(table); ScanKeyInit(&key[0], Anum_repset_table_reloid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(reloid)); /* TODO: use index */ scan = systable_beginscan(repset_rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetTableTuple *t = (RepSetTableTuple *) GETSTRUCT(tuple); ListCell *lc; foreach (lc, subs_replication_sets) { PGLogicalRepSet *repset = lfirst(lc); bool isnull; Datum d; if (t->setid == repset->id) { /* Update the action filter. */ if (repset->replicate_insert) entry->replicate_insert = true; if (repset->replicate_update) entry->replicate_update = true; if (repset->replicate_delete) entry->replicate_delete = true; /* Update replicated column map. */ d = heap_getattr(tuple, Anum_repset_table_att_list, repset_rel_desc, &isnull); if (!isnull) { Datum *elems; int nelems, i; deconstruct_array(DatumGetArrayTypePCopy(d), TEXTOID, -1, false, 'i', &elems, NULL, &nelems); for (i = 0; i < nelems; i++) { const char *attname = TextDatumGetCString(elems[i]); int attnum = get_att_num_by_name(table_desc, attname); MemoryContext olctx = MemoryContextSwitchTo(CacheMemoryContext); entry->att_list = bms_add_member(entry->att_list, attnum - FirstLowInvalidHeapAttributeNumber); MemoryContextSwitchTo(olctx); } } /* Add row filter if any. */ d = heap_getattr(tuple, Anum_repset_table_row_filter, repset_rel_desc, &isnull); if (!isnull) { MemoryContext olctx = MemoryContextSwitchTo(CacheMemoryContext); Node *row_filter = stringToNode(TextDatumGetCString(d)); entry->row_filter = lappend(entry->row_filter, row_filter); MemoryContextSwitchTo(olctx); } } } } systable_endscan(scan); table_close(repset_rel, RowExclusiveLock); entry->isvalid = true; return entry; } List * get_table_replication_sets(Oid nodeid, Oid reloid) { RangeVar *rv; Oid relid; Relation rel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tuple; List *replication_sets = NIL; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); relid = RangeVarGetRelid(rv, RowExclusiveLock, true); /* Backwards compat with 1.1/1.2 where the relation name was different. */ if (!OidIsValid(relid)) { rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_RELATION, -1); relid = RangeVarGetRelid(rv, RowExclusiveLock, true); if (!OidIsValid(relid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s.%s\" does not exist", rv->schemaname, rv->relname))); } rel = table_open(relid, NoLock); ScanKeyInit(&key[0], Anum_repset_table_reloid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(reloid)); /* TODO: use index */ scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetSeqTuple *t = (RepSetSeqTuple *) GETSTRUCT(tuple); PGLogicalRepSet *repset = get_replication_set(t->id); if (repset->nodeid != nodeid) continue; replication_sets = lappend(replication_sets, repset); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return replication_sets; } static bool sequence_has_replication_sets(Oid nodeid, Oid seqoid) { RangeVar *rv; Relation rel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tuple; bool res = false; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_repset_seq_seqoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); /* TODO: use index */ scan = systable_beginscan(rel, 0, true, NULL, 1, key); if (HeapTupleIsValid(tuple = systable_getnext(scan))) res = true; systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } List * get_seq_replication_sets(Oid nodeid, Oid seqoid) { RangeVar *rv; Relation rel; ScanKeyData key[1]; SysScanDesc scan; HeapTuple tuple; List *replication_sets = NIL; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_repset_table_reloid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); /* TODO: use index */ scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetSeqTuple *t = (RepSetSeqTuple *) GETSTRUCT(tuple); PGLogicalRepSet *repset = get_replication_set(t->id); if (repset->nodeid != nodeid) continue; replication_sets = lappend(replication_sets, repset); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return replication_sets; } /* * Add new tuple to the replication_sets catalog. */ void create_replication_set(PGLogicalRepSet *repset) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_repset]; bool nulls[Natts_repset]; NameData repset_name; if (strlen(repset->name) == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), errmsg("replication set name cannot be empty"))); if (get_replication_set_by_name(repset->nodeid, repset->name, true) != NULL) elog(ERROR, "replication set %s already exists", repset->name); /* * Generate new id unless one was already specified. */ if (repset->id == InvalidOid) { uint32 hashinput[2]; hashinput[0] = repset->nodeid; hashinput[1] = DatumGetUInt32(hash_any((const unsigned char *) repset->name, strlen(repset->name))); repset->id = DatumGetUInt32(hash_any((const unsigned char *) hashinput, (int) sizeof(hashinput))); } rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_repset_id - 1] = ObjectIdGetDatum(repset->id); values[Anum_repset_nodeid - 1] = ObjectIdGetDatum(repset->nodeid); namestrcpy(&repset_name, repset->name); values[Anum_repset_name - 1] = NameGetDatum(&repset_name); values[Anum_repset_replicate_insert - 1] = BoolGetDatum(repset->replicate_insert); values[Anum_repset_replicate_update - 1] = BoolGetDatum(repset->replicate_update); values[Anum_repset_replicate_delete - 1] = BoolGetDatum(repset->replicate_delete); values[Anum_repset_replicate_truncate - 1] = BoolGetDatum(repset->replicate_truncate); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Alter the existing replication set. */ void alter_replication_set(PGLogicalRepSet *repset) { RangeVar *rv; SysScanDesc scan; ScanKeyData key[1]; Relation rel; TupleDesc tupDesc; HeapTuple oldtup, newtup; Datum values[Natts_repset]; bool nulls[Natts_repset]; bool replaces[Natts_repset]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Search for repset record. */ ScanKeyInit(&key[0], Anum_repset_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(repset->id)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); oldtup = systable_getnext(scan); if (!HeapTupleIsValid(oldtup)) elog(ERROR, "replication set %u not found", repset->id); /* * Validate that replication is not being changed to replicate UPDATEs * and DELETEs if it contains any tables without replication identity. */ if (repset->replicate_update || repset->replicate_delete) { RangeVar *tablesrv; Relation tablesrel; SysScanDesc tablesscan; HeapTuple tablestup; ScanKeyData tableskey[1]; tablesrv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); tablesrel = table_openrv(tablesrv, RowExclusiveLock); /* Search for the record. */ ScanKeyInit(&tableskey[0], Anum_repset_table_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(repset->id)); tablesscan = systable_beginscan(tablesrel, 0, true, NULL, 1, tableskey); /* Process every individual table in the set. */ while (HeapTupleIsValid(tablestup = systable_getnext(tablesscan))) { RepSetTableTuple *t = (RepSetTableTuple *) GETSTRUCT(tablestup); Relation targetrel; targetrel = table_open(t->reloid, AccessShareLock); /* Check of relation has replication index. */ if (RelationGetForm(targetrel)->relkind == RELKIND_RELATION) { if (targetrel->rd_indexvalid == 0) RelationGetIndexList(targetrel); if (!OidIsValid(targetrel->rd_replidindex) && (repset->replicate_update || repset->replicate_delete)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication set %s cannot be altered to " "replicate UPDATEs or DELETEs because it " "contains tables without PRIMARY KEY", repset->name))); } table_close(targetrel, NoLock); } systable_endscan(tablesscan); table_close(tablesrel, RowExclusiveLock); } /* Everything ok, form a new tuple. */ memset(nulls, false, sizeof(nulls)); memset(replaces, true, sizeof(replaces)); replaces[Anum_repset_id - 1] = false; replaces[Anum_repset_name - 1] = false; replaces[Anum_repset_nodeid - 1] = false; values[Anum_repset_replicate_insert - 1] = BoolGetDatum(repset->replicate_insert); values[Anum_repset_replicate_update - 1] = BoolGetDatum(repset->replicate_update); values[Anum_repset_replicate_delete - 1] = BoolGetDatum(repset->replicate_delete); values[Anum_repset_replicate_truncate - 1] = BoolGetDatum(repset->replicate_truncate); newtup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); /* Update the tuple in catalog. */ CatalogTupleUpdate(rel, &oldtup->t_self, newtup); /* Cleanup. */ heap_freetuple(newtup); systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Remove all tables from replication set. */ static void replication_set_remove_tables(Oid setid, Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; ObjectAddress myself; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); rel = table_openrv(rv, RowExclusiveLock); myself.classId = get_replication_set_table_rel_oid(); myself.objectId = setid; /* Search for the record. */ ScanKeyInit(&key[0], Anum_repset_table_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetTableTuple *t = (RepSetTableTuple *) GETSTRUCT(tuple); Oid reloid = t->reloid; /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); CacheInvalidateRelcacheByRelid(reloid); /* Dependency cleanup. */ myself.objectSubId = reloid; pglogical_tryDropDependencies(&myself, DROP_CASCADE); } /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Remove all sequences from replication set. */ static void replication_set_remove_seqs(Oid setid, Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; ObjectAddress myself; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for the record. */ ScanKeyInit(&key[0], Anum_repset_table_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); myself.classId = get_replication_set_seq_rel_oid(); myself.objectId = setid; while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetSeqTuple *t = (RepSetSeqTuple *) GETSTRUCT(tuple); Oid seqoid = t->seqoid; /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Make sure the sequence_has_replication_sets sees the changes. */ CommandCounterIncrement(); if (!sequence_has_replication_sets(nodeid, seqoid)) pglogical_drop_sequence_state_record(seqoid); CacheInvalidateRelcacheByRelid(seqoid); /* Dependency cleanup. */ myself.objectSubId = seqoid; pglogical_tryDropDependencies(&myself, DROP_CASCADE); } /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Delete the tuple from replication sets catalog. */ void drop_replication_set(Oid setid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; RepSetTuple *repset; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for repset record. */ ScanKeyInit(&key[0], Anum_repset_id, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) elog(ERROR, "replication set %u not found", setid); repset = (RepSetTuple *) GETSTRUCT(tuple); /* Remove all tables and sequences associated with the repset. */ replication_set_remove_tables(setid, repset->nodeid); replication_set_remove_seqs(setid, repset->nodeid); /* Remove the tuple. */ simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ CacheInvalidateRelcache(rel); systable_endscan(scan); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } void drop_node_replication_sets(Oid nodeid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; Assert(IsTransactionState()); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_repset_nodeid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(nodeid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); /* Remove matching tuples. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetTuple *repset = (RepSetTuple *) GETSTRUCT(tuple); /* Remove all tables and sequences associated with the repset. */ replication_set_remove_tables(repset->id, repset->nodeid); replication_set_remove_seqs(repset->id, repset->nodeid); /* Remove the repset. */ simple_heap_delete(rel, &tuple->t_self); } /* Cleanup. */ CacheInvalidateRelcache(rel); systable_endscan(scan); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Insert new replication set / table mapping. */ void replication_set_add_table(Oid setid, Oid reloid, List *att_list, Node *row_filter) { RangeVar *rv; Relation rel; Relation targetrel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_repset_table]; bool nulls[Natts_repset_table]; PGLogicalRepSet *repset = get_replication_set(setid); ObjectAddress referenced; ObjectAddress myself; /* Open the relation. */ targetrel = table_open(reloid, ShareRowExclusiveLock); /* UNLOGGED and TEMP relations cannot be part of replication set. */ if (!RelationNeedsWAL(targetrel)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("UNLOGGED and TEMP tables cannot be replicated"))); if (targetrel->rd_indexvalid == 0) RelationGetIndexList(targetrel); if (!OidIsValid(targetrel->rd_replidindex) && (repset->replicate_update || repset->replicate_delete)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("table %s cannot be added to replication set %s", RelationGetRelationName(targetrel), repset->name), errdetail("table does not have PRIMARY KEY and given " "replication set is configured to replicate " "UPDATEs and/or DELETEs"), errhint("Add a PRIMARY KEY to the table"))); create_truncate_trigger(targetrel); table_close(targetrel, NoLock); /* Open the catalog. */ rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_repset_table_setid - 1] = ObjectIdGetDatum(repset->id); values[Anum_repset_table_reloid - 1] = ObjectIdGetDatum(reloid); if (list_length(att_list)) values[Anum_repset_table_att_list - 1] = PointerGetDatum(strlist_to_textarray(att_list)); else nulls[Anum_repset_table_att_list - 1] = true; if (row_filter) values[Anum_repset_table_row_filter - 1] = CStringGetTextDatum(nodeToString(row_filter)); else nulls[Anum_repset_table_row_filter - 1] = true; tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ CacheInvalidateRelcacheByRelid(reloid); heap_freetuple(tup); myself.classId = get_replication_set_table_rel_oid(); myself.objectId = setid; myself.objectSubId = reloid; referenced.classId = RelationRelationId; referenced.objectId = reloid; referenced.objectSubId = 0; pglogical_recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); /* Make sure we record dependencies for the row_filter as well. */ if (row_filter) { pglogical_recordDependencyOnSingleRelExpr(&myself, row_filter, reloid, DEPENDENCY_NORMAL, DEPENDENCY_NORMAL); } table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Insert new replication set / sequence mapping. */ void replication_set_add_seq(Oid setid, Oid seqoid) { RangeVar *rv; Relation rel; Relation targetrel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_repset_table]; bool nulls[Natts_repset_table]; PGLogicalRepSet *repset = get_replication_set(setid); ObjectAddress referenced; ObjectAddress myself; /* Open the relation. */ targetrel = table_open(seqoid, ShareRowExclusiveLock); /* UNLOGGED and TEMP relations cannot be part of replication set. */ if (!RelationNeedsWAL(targetrel)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("UNLOGGED and TEMP sequences cannot be replicated"))); /* Ensure track the state of the sequence. */ pglogical_create_sequence_state_record(seqoid); table_close(targetrel, NoLock); /* Open the catalog. */ rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_repset_seq_setid - 1] = ObjectIdGetDatum(repset->id); values[Anum_repset_seq_seqoid - 1] = ObjectIdGetDatum(seqoid); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ CacheInvalidateRelcacheByRelid(seqoid); heap_freetuple(tup); myself.classId = get_replication_set_seq_rel_oid(); myself.objectId = setid; myself.objectSubId = seqoid; referenced.classId = RelationRelationId; referenced.objectId = seqoid; referenced.objectSubId = 0; pglogical_recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Get list of table oids. */ List * replication_set_get_tables(Oid setid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; List *res = NIL; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Setup the search. */ ScanKeyInit(&key[0], Anum_repset_table_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); /* Build the list from the table. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetTableTuple *t = (RepSetTableTuple *) GETSTRUCT(tuple); res = lappend_oid(res, t->reloid); } /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } /* * Get list of sequence oids. */ List * replication_set_get_seqs(Oid setid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; List *res = NIL; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); /* Setup the search. */ ScanKeyInit(&key[0], Anum_repset_seq_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); /* Build the list from the table. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) { RepSetSeqTuple *s = (RepSetSeqTuple *) GETSTRUCT(tuple); res = lappend_oid(res, s->seqoid); } /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } /* * Remove existing replication set / table mapping. */ void replication_set_remove_table(Oid setid, Oid reloid, bool from_drop) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[2]; ObjectAddress myself; rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_TABLE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for the record. */ ScanKeyInit(&key[0], Anum_repset_table_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); ScanKeyInit(&key[1], Anum_repset_table_reloid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(reloid)); scan = systable_beginscan(rel, 0, true, NULL, 2, key); tuple = systable_getnext(scan); /* * Remove the tuple if found, if not found report error unless this * function was called as result of table drop. */ if (HeapTupleIsValid(tuple)) simple_heap_delete(rel, &tuple->t_self); else if (!from_drop) elog(ERROR, "replication set table mapping %u:%u not found", setid, reloid); /* We can only invalidate the relcache when relation still exists. */ if (!from_drop) CacheInvalidateRelcacheByRelid(reloid); /* Dependency cleanup. */ myself.classId = get_replication_set_table_rel_oid(); myself.objectId = setid; myself.objectSubId = reloid; pglogical_tryDropDependencies(&myself, DROP_CASCADE); /* Make sure the has_relation_replication_sets sees the changes. */ CommandCounterIncrement(); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Remove existing replication set / sequence mapping. */ void replication_set_remove_seq(Oid setid, Oid seqoid, bool from_drop) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[2]; ObjectAddress myself; PGLogicalRepSet *repset = get_replication_set(setid); rv = makeRangeVar(EXTENSION_NAME, CATALOG_REPSET_SEQ, -1); rel = table_openrv(rv, RowExclusiveLock); /* Search for the record. */ ScanKeyInit(&key[0], Anum_repset_seq_setid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(setid)); ScanKeyInit(&key[1], Anum_repset_seq_seqoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); scan = systable_beginscan(rel, 0, true, NULL, 2, key); tuple = systable_getnext(scan); /* * Remove the tuple if found, if not found report error uless this function * was called as result of table drop. */ if (HeapTupleIsValid(tuple)) simple_heap_delete(rel, &tuple->t_self); else if (!from_drop) elog(ERROR, "replication set sequence mapping %u:%u not found", setid, seqoid); /* We can only invalidate the relcache when relation still exists. */ if (!from_drop) CacheInvalidateRelcacheByRelid(seqoid); /* Dependency cleanup. */ myself.classId = get_replication_set_seq_rel_oid(); myself.objectId = setid; myself.objectSubId = seqoid; pglogical_tryDropDependencies(&myself, DROP_CASCADE); /* Make sure the has_relation_replication_sets sees the changes. */ CommandCounterIncrement(); if (from_drop || !sequence_has_replication_sets(repset->nodeid, seqoid)) pglogical_drop_sequence_state_record(seqoid); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Utility functions for working with PGLogicalRepSet struct. */ PGLogicalRepSet* replication_set_from_tuple(HeapTuple tuple) { RepSetTuple *repsettup = (RepSetTuple *) GETSTRUCT(tuple); PGLogicalRepSet *repset = (PGLogicalRepSet *) palloc(sizeof(PGLogicalRepSet)); repset->id = repsettup->id; repset->nodeid = repsettup->nodeid; repset->name = pstrdup(NameStr(repsettup->name)); repset->replicate_insert = repsettup->replicate_insert; repset->replicate_update = repsettup->replicate_update; repset->replicate_delete = repsettup->replicate_delete; repset->replicate_truncate = repsettup->replicate_truncate; return repset; } /* * Get (cached) oid of the replication set table. */ Oid get_replication_set_rel_oid(void) { static Oid repsetreloid = InvalidOid; if (repsetreloid == InvalidOid) repsetreloid = get_pglogical_table_oid(CATALOG_REPSET); return repsetreloid; } /* * Get (cached) oid of the replication set table mapping table. */ Oid get_replication_set_table_rel_oid(void) { static Oid repsettablereloid = InvalidOid; if (repsettablereloid == InvalidOid) repsettablereloid = get_pglogical_table_oid(CATALOG_REPSET_TABLE); return repsettablereloid; } /* * Get (cached) oid of the replication set sequence mapping table. */ Oid get_replication_set_seq_rel_oid(void) { static Oid repsetseqreloid = InvalidOid; if (repsetseqreloid == InvalidOid) repsetseqreloid = get_pglogical_table_oid(CATALOG_REPSET_SEQ); return repsetseqreloid; } /* * Given a List of strings, return it as single comma separated * string, quoting identifiers as needed. * * This is essentially the reverse of SplitIdentifierString. * * The caller should free the result. */ char * stringlist_to_identifierstr(List *strings) { ListCell *lc; StringInfoData res; bool first = true; initStringInfo(&res); foreach (lc, strings) { if (first) first = false; else appendStringInfoChar(&res, ','); appendStringInfoString(&res, quote_identifier((char *)lfirst(lc))); } return res.data; } int get_att_num_by_name(TupleDesc desc, const char *attname) { int i; for (i = 0; i < desc->natts; i++) { if (TupleDescAttr(desc,i)->attisdropped) continue; if (namestrcmp(&(TupleDescAttr(desc,i)->attname), attname) == 0) return TupleDescAttr(desc,i)->attnum; } return FirstLowInvalidHeapAttributeNumber; } pglogical-REL2_4_1/pglogical_repset.h000066400000000000000000000057641415142317000176720ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_repset.h * pglogical replication set manipulation functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_repset.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_REPSET_H #define PGLOGICAL_REPSET_H #include "replication/reorderbuffer.h" typedef struct PGLogicalRepSet { Oid id; Oid nodeid; char *name; bool replicate_insert; bool replicate_update; bool replicate_delete; bool replicate_truncate; } PGLogicalRepSet; #define DEFAULT_REPSET_NAME "default" #define DEFAULT_INSONLY_REPSET_NAME "default_insert_only" #define DDL_SQL_REPSET_NAME "ddl_sql" /* This is only valid within one output plugin instance/walsender. */ typedef struct PGLogicalTableRepInfo { Oid reloid; /* key */ bool isvalid; /* is this entry valid? */ bool replicate_insert; /* should insert be replicated? */ bool replicate_update; /* should update be replicated? */ bool replicate_delete; /* should delete be replicated? */ Bitmapset *att_list; /* column filter NULL if everything is replicated otherwise each replicated column is a member */ List *row_filter; /* compiled row_filter nodes */ } PGLogicalTableRepInfo; extern PGLogicalRepSet *get_replication_set(Oid setid); extern PGLogicalRepSet *get_replication_set_by_name(Oid nodeid, const char *setname, bool missing_ok); extern List *get_node_replication_sets(Oid nodeid); extern List *get_replication_sets(Oid nodeid, List *replication_set_names, bool missing_ok); extern PGLogicalTableRepInfo *get_table_replication_info(Oid nodeid, Relation table, List *subs_replication_sets); extern void create_replication_set(PGLogicalRepSet *repset); extern void alter_replication_set(PGLogicalRepSet *repset); extern void drop_replication_set(Oid setid); extern void drop_node_replication_sets(Oid nodeid); extern void replication_set_add_table(Oid setid, Oid reloid, List *att_list, Node *row_filter); extern void replication_set_add_seq(Oid setid, Oid seqoid); extern List *replication_set_get_tables(Oid setid); extern List *replication_set_get_seqs(Oid setid); extern PGDLLEXPORT void replication_set_remove_table(Oid setid, Oid reloid, bool from_drop); extern PGDLLEXPORT void replication_set_remove_seq(Oid setid, Oid reloid, bool from_drop); extern List *get_table_replication_sets(Oid nodeid, Oid reloid); extern List *get_seq_replication_sets(Oid nodeid, Oid seqoid); extern PGLogicalRepSet *replication_set_from_tuple(HeapTuple tuple); extern Oid get_replication_set_rel_oid(void); extern Oid get_replication_set_table_rel_oid(void); extern Oid get_replication_set_seq_rel_oid(void); extern char *stringlist_to_identifierstr(List *repsets); extern int get_att_num_by_name(TupleDesc desc, const char *attname); #endif /* PGLOGICAL_REPSET_H */ pglogical-REL2_4_1/pglogical_rpc.c000066400000000000000000000237101415142317000171360ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_rpc.c * Remote calls * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_rpc.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "catalog/pg_type.h" #include "storage/lock.h" #include "utils/rel.h" #include "pglogical_relcache.h" #include "pglogical_repset.h" #include "pglogical_rpc.h" #include "pglogical.h" #define atooid(x) ((Oid) strtoul((x), NULL, 10)) /* * Fetch list of tables that are grouped in specified replication sets. */ List * pg_logical_get_remote_repset_tables(PGconn *conn, List *replication_sets) { PGresult *res; int i; List *tables = NIL; ListCell *lc; bool first = true; StringInfoData query; StringInfoData repsetarr; initStringInfo(&repsetarr); foreach (lc, replication_sets) { char *repset_name = lfirst(lc); if (first) first = false; else appendStringInfoChar(&repsetarr, ','); appendStringInfo(&repsetarr, "%s", PQescapeLiteral(conn, repset_name, strlen(repset_name))); } initStringInfo(&query); if (pglogical_remote_function_exists(conn, "pglogical", "show_repset_table_info", 2, NULL)) { /* PGLogical 2.0+ */ appendStringInfo(&query, "SELECT i.relid, i.nspname, i.relname, i.att_list," " i.has_row_filter" " FROM (SELECT DISTINCT relid FROM pglogical.tables WHERE set_name = ANY(ARRAY[%s])) t," " LATERAL pglogical.show_repset_table_info(t.relid, ARRAY[%s]) i", repsetarr.data, repsetarr.data); } else { /* PGLogical 1.x */ appendStringInfo(&query, "SELECT r.oid AS relid, t.nspname, t.relname, ARRAY(SELECT attname FROM pg_attribute WHERE attrelid = r.oid AND NOT attisdropped AND attnum > 0) AS att_list," " false AS has_row_filter" " FROM pglogical.tables t, pg_catalog.pg_class r, pg_catalog.pg_namespace n" " WHERE t.set_name = ANY(ARRAY[%s]) AND r.relname = t.relname AND n.oid = r.relnamespace AND n.nspname = t.nspname", repsetarr.data); } res = PQexec(conn, query.data); /* TODO: better error message? */ if (PQresultStatus(res) != PGRES_TUPLES_OK) elog(ERROR, "could not get table list: %s", PQresultErrorMessage(res)); for (i = 0; i < PQntuples(res); i++) { PGLogicalRemoteRel *remoterel = palloc0(sizeof(PGLogicalRemoteRel)); remoterel->relid = atooid(PQgetvalue(res, i, 0)); remoterel->nspname = pstrdup(PQgetvalue(res, i, 1)); remoterel->relname = pstrdup(PQgetvalue(res, i, 2)); if (!parsePGArray(PQgetvalue(res, i, 3), &remoterel->attnames, &remoterel->natts)) elog(ERROR, "could not parse column list for table"); remoterel->hasRowFilter = (strcmp(PQgetvalue(res, i, 4), "t") == 0); tables = lappend(tables, remoterel); } PQclear(res); return tables; } /* * Like above but for one table. */ PGLogicalRemoteRel * pg_logical_get_remote_repset_table(PGconn *conn, RangeVar *rv, List *replication_sets) { PGLogicalRemoteRel *remoterel = palloc0(sizeof(PGLogicalRemoteRel)); PGresult *res; ListCell *lc; bool first = true; StringInfoData query; StringInfoData repsetarr; StringInfoData relname; initStringInfo(&relname); appendStringInfo(&relname, "%s.%s", PQescapeIdentifier(conn, rv->schemaname, strlen(rv->schemaname)), PQescapeIdentifier(conn, rv->relname, strlen(rv->relname))); initStringInfo(&repsetarr); foreach (lc, replication_sets) { char *repset_name = lfirst(lc); if (first) first = false; else appendStringInfoChar(&repsetarr, ','); appendStringInfo(&repsetarr, "%s", PQescapeLiteral(conn, repset_name, strlen(repset_name))); } initStringInfo(&query); if (pglogical_remote_function_exists(conn, "pglogical", "show_repset_table_info", 2, NULL)) { /* PGLogical 2.0+ */ appendStringInfo(&query, "SELECT i.relid, i.nspname, i.relname, i.att_list," " i.has_row_filter" " FROM pglogical.show_repset_table_info(%s::regclass, ARRAY[%s]) i", PQescapeLiteral(conn, relname.data, relname.len), repsetarr.data); } else { /* PGLogical 1.x */ appendStringInfo(&query, "SELECT r.oid AS relid, t.nspname, t.relname, ARRAY(SELECT attname FROM pg_attribute WHERE attrelid = r.oid AND NOT attisdropped AND attnum > 0) AS att_list," " false AS has_row_filter" " FROM pglogical.tables t, pg_catalog.pg_class r, pg_catalog.pg_namespace n" " WHERE r.oid = %s::regclass AND t.set_name = ANY(ARRAY[%s]) AND r.relname = t.relname AND n.oid = r.relnamespace AND n.nspname = t.nspname", PQescapeLiteral(conn, relname.data, relname.len), repsetarr.data); } res = PQexec(conn, query.data); /* TODO: better error message? */ if (PQresultStatus(res) != PGRES_TUPLES_OK || PQntuples(res) != 1) elog(ERROR, "could not get table list: %s", PQresultErrorMessage(res)); remoterel->relid = atooid(PQgetvalue(res, 0, 0)); remoterel->nspname = pstrdup(PQgetvalue(res, 0, 1)); remoterel->relname = pstrdup(PQgetvalue(res, 0, 2)); if (!parsePGArray(PQgetvalue(res, 0, 3), &remoterel->attnames, &remoterel->natts)) elog(ERROR, "could not parse column list for table"); remoterel->hasRowFilter = (strcmp(PQgetvalue(res, 0, 4), "t") == 0); PQclear(res); return remoterel; } /* * Is the remote slot active?. */ bool pglogical_remote_slot_active(PGconn *conn, const char *slot_name) { PGresult *res; const char *values[1]; Oid types[1] = { TEXTOID }; bool ret; values[0] = slot_name; res = PQexecParams(conn, "SELECT plugin, active " "FROM pg_catalog.pg_replication_slots " "WHERE slot_name = $1", 1, types, values, NULL, NULL, 0); if (PQresultStatus(res) != PGRES_TUPLES_OK) { ereport(ERROR, (errmsg("getting remote slot info failed"), errdetail("SELECT FROM pg_catalog.pg_replication_slots failed with: %s", PQerrorMessage(conn)))); } /* Slot not found return false */ if (PQntuples(res) == 0) { PQclear(res); return false; } /* Slot found, validate that it's pglogical slot */ if (PQgetisnull(res, 0, 0)) elog(ERROR, "Unexpectedly null field %s", PQfname(res, 0)); if (strcmp("pglogical_output", PQgetvalue(res, 0, 0)) != 0 && strcmp("pglogical", PQgetvalue(res, 0, 0)) != 0) ereport(ERROR, (errmsg("slot %s is not pglogical slot", slot_name))); ret = (strcmp(PQgetvalue(res, 0, 1), "t") == 0); PQclear(res); return ret; } /* * Drops replication slot on remote node that has been used by the local node. */ void pglogical_drop_remote_slot(PGconn *conn, const char *slot_name) { PGresult *res; const char *values[1]; Oid types[1] = { TEXTOID }; values[0] = slot_name; /* Check if the slot exists */ res = PQexecParams(conn, "SELECT plugin " "FROM pg_catalog.pg_replication_slots " "WHERE slot_name = $1", 1, types, values, NULL, NULL, 0); if (PQresultStatus(res) != PGRES_TUPLES_OK) { ereport(ERROR, (errmsg("getting remote slot info failed"), errdetail("SELECT FROM pg_catalog.pg_replication_slots failed with: %s", PQerrorMessage(conn)))); } /* Slot not found return false */ if (PQntuples(res) == 0) { PQclear(res); return; } /* Slot found, validate that it's BDR slot */ if (PQgetisnull(res, 0, 0)) elog(ERROR, "Unexpectedly null field %s", PQfname(res, 0)); if (strcmp("pglogical_output", PQgetvalue(res, 0, 0)) != 0 && strcmp("pglogical", PQgetvalue(res, 0, 0)) != 0) ereport(ERROR, (errmsg("slot %s is not pglogical slot", slot_name))); PQclear(res); res = PQexecParams(conn, "SELECT pg_drop_replication_slot($1)", 1, types, values, NULL, NULL, 0); /* And finally, drop the slot. */ if (PQresultStatus(res) != PGRES_TUPLES_OK) { ereport(ERROR, (errmsg("remote slot drop failed"), errdetail("SELECT pg_drop_replication_slot() failed with: %s", PQerrorMessage(conn)))); } PQclear(res); } void pglogical_remote_node_info(PGconn *conn, Oid *nodeid, char **node_name, char **sysid, char **dbname, char **replication_sets) { PGresult *res; res = PQexec(conn, "SELECT node_id, node_name, sysid, dbname, replication_sets FROM pglogical.pglogical_node_info()"); if (PQresultStatus(res) != PGRES_TUPLES_OK) elog(ERROR, "could not fetch remote node info: %s\n", PQerrorMessage(conn)); /* No nodes found? */ if (PQntuples(res) == 0) elog(ERROR, "the remote database is not configured as a pglogical node.\n"); if (PQntuples(res) > 1) elog(ERROR, "the remote database has multiple nodes configured. That is not supported with current version of pglogical.\n"); *nodeid = atooid(PQgetvalue(res, 0, 0)); *node_name = pstrdup(PQgetvalue(res, 0, 1)); if (sysid) *sysid = pstrdup(PQgetvalue(res, 0, 2)); if (dbname) *dbname = pstrdup(PQgetvalue(res, 0, 3)); if (replication_sets) *replication_sets = pstrdup(PQgetvalue(res, 0, 4)); PQclear(res); } bool pglogical_remote_function_exists(PGconn *conn, const char *nspname, const char *proname, int nargs, char *argname) { PGresult *res; const char *values[2]; Oid types[2] = { TEXTOID, TEXTOID }; bool ret; StringInfoData query; values[0] = proname; values[1] = nspname; initStringInfo(&query); appendStringInfo(&query, "SELECT oid " " FROM pg_catalog.pg_proc " " WHERE proname = $1 " " AND pronamespace = " " (SELECT oid " " FROM pg_catalog.pg_namespace " " WHERE nspname = $2)"); if (nargs >= 0) appendStringInfo(&query, " AND pronargs = '%d'", nargs); if (argname != NULL) appendStringInfo(&query, " AND %s = ANY (proargnames)", PQescapeLiteral(conn, argname, strlen(argname))); res = PQexecParams(conn, query.data, 2, types, values, NULL, NULL, 0); if (PQresultStatus(res) != PGRES_TUPLES_OK) elog(ERROR, "could not fetch remote function info: %s\n", PQerrorMessage(conn)); ret = PQntuples(res) > 0; PQclear(res); return ret; } pglogical-REL2_4_1/pglogical_rpc.h000066400000000000000000000020701415142317000171370ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_rpc.h * Remote calls * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_rpc.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_RPC_H #define PGLOGICAL_RPC_H #include "libpq-fe.h" extern List *pg_logical_get_remote_repset_tables(PGconn *conn, List *replication_sets); extern PGLogicalRemoteRel *pg_logical_get_remote_repset_table(PGconn *conn, RangeVar *rv, List *replication_sets); extern bool pglogical_remote_slot_active(PGconn *conn, const char *slot_name); extern void pglogical_drop_remote_slot(PGconn *conn, const char *slot_name); extern void pglogical_remote_node_info(PGconn *conn, Oid *nodeid, char **node_name, char **sysid, char **dbname, char **replication_sets); extern bool pglogical_remote_function_exists(PGconn *conn, const char *nspname, const char *proname, int nargs, char *argname); #endif /* PGLOGICAL_RPC_H */ pglogical-REL2_4_1/pglogical_sequences.c000066400000000000000000000212401415142317000203410ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_manager.c * pglogical worker for managing apply workers in a database * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_manager.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" #include "commands/sequence.h" #include "nodes/makefuncs.h" #include "replication/reorderbuffer.h" #include "utils/fmgroids.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "pglogical.h" #include "pglogical_queue.h" #include "pglogical_repset.h" #define CATALOG_SEQUENCE_STATE "sequence_state" #define SEQUENCE_REPLICATION_MIN_CACHE 1000 #define SEQUENCE_REPLICATION_MAX_CACHE 1000000 typedef struct SeqStateTuple { Oid seqoid; int32 cache_size; int64 last_value; } SeqStateTuple; #define Natts_sequence_state 3 #define Anum_sequence_state_seqoid 1 #define Anum_sequence_state_cache_size 2 #define Anum_sequence_state_last_value 3 /* Get last value of individual sequence. */ int64 sequence_get_last_value(Oid seqoid) { Relation seqrel; SysScanDesc scan; HeapTuple tup; int64 last_value; Form_pg_sequence seq; seqrel = table_open(seqoid, AccessShareLock); scan = systable_beginscan(seqrel, 0, false, NULL, 0, NULL); tup = systable_getnext(scan); Assert(HeapTupleIsValid(tup)); seq = (Form_pg_sequence) GETSTRUCT(tup); last_value = seq->last_value; systable_endscan(scan); table_close(seqrel, AccessShareLock); return last_value; } /* * Process sequence updates. */ bool synchronize_sequences(void) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; PGLogicalLocalNode *local_node; bool ret = true; StartTransactionCommand(); local_node = get_local_node(false, true); if (!local_node) { AbortCurrentTransaction(); return ret; } rv = makeRangeVar(EXTENSION_NAME, CATALOG_SEQUENCE_STATE, -1); rel = table_openrv(rv, RowExclusiveLock); scan = systable_beginscan(rel, 0, true, NULL, 0, NULL); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { SeqStateTuple *oldseq = (SeqStateTuple *) GETSTRUCT(tuple); SeqStateTuple *newseq; int64 last_value; HeapTuple newtup; List *repsets; List *repset_names; ListCell *lc; char *nspname; char *relname; StringInfoData json; CHECK_FOR_INTERRUPTS(); last_value = sequence_get_last_value(oldseq->seqoid); /* Not enough of the sequence was consumed yet for us to care. */ if (oldseq->last_value >= last_value + SEQUENCE_REPLICATION_MIN_CACHE / 2) continue; newtup = heap_copytuple(tuple); newseq = (SeqStateTuple *) GETSTRUCT(newtup); /* Consumed more than half of cache of the sequence. */ if (newseq->last_value + newseq->cache_size / 2 < last_value) ret = false; /* The sequence is consumed too fast, increase the buffer cache. */ if (newseq->last_value + newseq->cache_size <= last_value) newseq->cache_size = Min(SEQUENCE_REPLICATION_MAX_CACHE, newseq->cache_size * 2); newseq->last_value = last_value + newseq->cache_size; simple_heap_update(rel, &tuple->t_self, newtup); repsets = get_seq_replication_sets(local_node->node->id, oldseq->seqoid); repset_names = NIL; foreach (lc, repsets) { PGLogicalRepSet *repset = (PGLogicalRepSet *) lfirst(lc); repset_names = lappend(repset_names, pstrdup(repset->name)); } nspname = get_namespace_name(get_rel_namespace(oldseq->seqoid)); relname = get_rel_name(oldseq->seqoid); initStringInfo(&json); appendStringInfoString(&json, "{\"schema_name\": "); escape_json(&json, nspname); appendStringInfoString(&json, ",\"sequence_name\": "); escape_json(&json, relname); appendStringInfo(&json, ",\"last_value\": \""INT64_FORMAT"\"", newseq->last_value); appendStringInfo(&json, "}"); queue_message(repset_names, GetUserId(), QUEUE_COMMAND_TYPE_SEQUENCE, json.data); } /* Cleanup */ systable_endscan(scan); table_close(rel, NoLock); CommitTransactionCommand(); return ret; } /* * Process sequence updates. */ void synchronize_sequence(Oid seqoid) { RangeVar *rv; Relation rel; Relation seqrel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; SeqStateTuple *newseq; int64 last_value; HeapTuple newtup; List *repsets; List *repset_names; ListCell *lc; char *nspname; char *relname; StringInfoData json; PGLogicalLocalNode *local_node = get_local_node(true, false); /* Check if the oid points to actual sequence. */ seqrel = table_open(seqoid, AccessShareLock); if (seqrel->rd_rel->relkind != RELKIND_SEQUENCE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a sequence", RelationGetRelationName(seqrel)))); /* Now search for it in our tracking table. */ rv = makeRangeVar(EXTENSION_NAME, CATALOG_SEQUENCE_STATE, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_sequence_state_seqoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("\"%s\" is not a replicated sequence", RelationGetRelationName(seqrel)))); newtup = heap_copytuple(tuple); newseq = (SeqStateTuple *) GETSTRUCT(newtup); last_value = sequence_get_last_value(seqoid); newseq->last_value = last_value + newseq->cache_size; simple_heap_update(rel, &tuple->t_self, newtup); repsets = get_seq_replication_sets(local_node->node->id, seqoid); repset_names = NIL; foreach (lc, repsets) { PGLogicalRepSet *repset = (PGLogicalRepSet *) lfirst(lc); repset_names = lappend(repset_names, pstrdup(repset->name)); } nspname = get_namespace_name(RelationGetNamespace(seqrel)); relname = RelationGetRelationName(seqrel); initStringInfo(&json); appendStringInfoString(&json, "{\"schema_name\": "); escape_json(&json, nspname); appendStringInfoString(&json, ",\"sequence_name\": "); escape_json(&json, relname); appendStringInfo(&json, ",\"last_value\": \""INT64_FORMAT"\"", newseq->last_value); appendStringInfo(&json, "}"); queue_message(repset_names, GetUserId(), QUEUE_COMMAND_TYPE_SEQUENCE, json.data); /* Cleanup */ systable_endscan(scan); table_close(rel, NoLock); table_close(seqrel, AccessShareLock); } /* * Makes sure there is sequence state record for given sequence. */ void pglogical_create_sequence_state_record(Oid seqoid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SEQUENCE_STATE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Check if the state record already exists. */ ScanKeyInit(&key[0], Anum_sequence_state_seqoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); /* And if it doesn't insert it. */ if (!HeapTupleIsValid(tuple)) { Datum values[Natts_sequence_state]; bool nulls[Natts_sequence_state]; TupleDesc tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_sequence_state_seqoid - 1] = ObjectIdGetDatum(seqoid); values[Anum_sequence_state_cache_size - 1] = Int32GetDatum(SEQUENCE_REPLICATION_MIN_CACHE); values[Anum_sequence_state_last_value - 1] = Int64GetDatum(sequence_get_last_value(seqoid)); tuple = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tuple); } /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } /* * Makes sure there isn't sequence state record for given sequence. */ void pglogical_drop_sequence_state_record(Oid seqoid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_SEQUENCE_STATE, -1); rel = table_openrv(rv, RowExclusiveLock); /* Check if the state record already exists. */ ScanKeyInit(&key[0], Anum_sequence_state_seqoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqoid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); tuple = systable_getnext(scan); if (HeapTupleIsValid(tuple)) simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); CommandCounterIncrement(); } pglogical-REL2_4_1/pglogical_sync.c000066400000000000000000001641021415142317000173270ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_sync.c * table synchronization functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_sync.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include #ifdef WIN32 #include #else #include #endif #include "libpq-fe.h" #include "miscadmin.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/skey.h" #include "access/stratnum.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "commands/dbcommands.h" #include "commands/tablecmds.h" #include "lib/stringinfo.h" #include "utils/memutils.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "pgstat.h" #include "replication/origin.h" #include "storage/fd.h" #include "storage/ipc.h" #include "storage/proc.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/pg_lsn.h" #include "utils/rel.h" #include "utils/resowner.h" #include "pglogical_relcache.h" #include "pglogical_repset.h" #include "pglogical_rpc.h" #include "pglogical_sync.h" #include "pglogical_worker.h" #include "pglogical.h" #define CATALOG_LOCAL_SYNC_STATUS "local_sync_status" #if PG_VERSION_NUM < 90500 #define PGDUMP_BINARY "pglogical_dump" #else #define PGDUMP_BINARY "pg_dump" #endif #define PGRESTORE_BINARY "pg_restore" #define Natts_local_sync_state 6 #define Anum_sync_kind 1 #define Anum_sync_subid 2 #define Anum_sync_nspname 3 #define Anum_sync_relname 4 #define Anum_sync_status 5 #define Anum_sync_statuslsn 6 void pglogical_sync_main(Datum main_arg); static PGLogicalSyncWorker *MySyncWorker = NULL; #ifdef WIN32 static int exec_cmd_win32(const char *cmd, char *cmdargv[]); #endif /* * Run a command and wait for it to exit, then return its exit code * in the same format as waitpid() including on Windows. * * Does not elog(ERROR). * * 'cmd' must be a full relative or absolute path to the executable to * start. The PATH is not searched. * * Preserves each argument in cmdargv as a discrete argument to the child * process. The first entry in cmdargv is passed as the child process's * argv[0], so the first "real" argument begins at index 1. * * Uses the current environment and working directory. * * Note that if we elog(ERROR) or elog(FATAL) here we won't kill the * child proc. */ static int exec_cmd(const char *cmd, char *cmdargv[]) { pid_t pid; int stat; /* Fire off execv in child */ fflush(stdout); fflush(stderr); #ifndef WIN32 if ((pid = fork()) == 0) { if (execv(cmd, cmdargv) < 0) { ereport(ERROR, (errmsg("could not execute \"%s\": %m", cmd))); /* We're already in the child process here, can't return */ exit(1); } } if (waitpid(pid, &stat, 0) != pid) stat = -1; #else stat = exec_cmd_win32(cmd, cmdargv); #endif return stat; } static void get_pg_executable(char *cmdname, char *cmdbuf) { uint32 version; if (find_other_exec_version(my_exec_path, cmdname, &version, cmdbuf)) elog(ERROR, "pglogical subscriber init failed to find %s relative to binary %s", cmdname, my_exec_path); if (version / 100 != PG_VERSION_NUM / 100) elog(ERROR, "pglogical subscriber init found %s with wrong major version %d.%d, expected %d.%d", cmdname, version / 100 / 100, version / 100 % 100, PG_VERSION_NUM / 100 / 100, PG_VERSION_NUM / 100 % 100); } static void dump_structure(PGLogicalSubscription *sub, const char *destfile, const char *snapshot) { char *dsn; char *err_msg; char pg_dump[MAXPGPATH]; char *cmdargv[20]; int cmdargc = 0; bool has_pgl_origin; StringInfoData s; dsn = pgl_get_connstr((char *) sub->origin_if->dsn, NULL, NULL, &err_msg); if (dsn == NULL) elog(ERROR, "invalid connection string \"%s\": %s", sub->origin_if->dsn, err_msg); get_pg_executable(PGDUMP_BINARY, pg_dump); cmdargv[cmdargc++] = pg_dump; /* custom format */ cmdargv[cmdargc++] = "-Fc"; /* schema only */ cmdargv[cmdargc++] = "-s"; /* snapshot */ initStringInfo(&s); appendStringInfo(&s, "--snapshot=%s", snapshot); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); /* Dumping database, filter out our extension. */ appendStringInfo(&s, "--exclude-schema=%s", EXTENSION_NAME); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); /* Skip the pglogical_origin if it exists locally. */ StartTransactionCommand(); has_pgl_origin = OidIsValid(LookupExplicitNamespace("pglogical_origin", true)); CommitTransactionCommand(); if (has_pgl_origin) { appendStringInfo(&s, "--exclude-schema=%s", "pglogical_origin"); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); } /* destination file */ appendStringInfo(&s, "--file=%s", destfile); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); /* connection string */ appendStringInfo(&s, "--dbname=%s", dsn); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); free(dsn); cmdargv[cmdargc++] = NULL; if (exec_cmd(pg_dump, cmdargv) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not execute pg_dump (\"%s\"): %m", pg_dump))); } static void restore_structure(PGLogicalSubscription *sub, const char *srcfile, const char *section) { char *dsn; char *err_msg; char pg_restore[MAXPGPATH]; char *cmdargv[20]; int cmdargc = 0; StringInfoData s; dsn = pgl_get_connstr((char *) sub->target_if->dsn, NULL, "-cpglogical.subscription_schema_restore=true", &err_msg); if (dsn == NULL) elog(ERROR, "invalid connection string \"%s\": %s", sub->target_if->dsn, err_msg); get_pg_executable(PGRESTORE_BINARY, pg_restore); cmdargv[cmdargc++] = pg_restore; /* section */ if (section) { initStringInfo(&s); appendStringInfo(&s, "--section=%s", section); cmdargv[cmdargc++] = pstrdup(s.data); resetStringInfo(&s); } /* stop execution on any error */ cmdargv[cmdargc++] = "--exit-on-error"; /* apply everything in single tx */ cmdargv[cmdargc++] = "-1"; /* connection string */ initStringInfo(&s); appendStringInfo(&s, "--dbname=%s", dsn); cmdargv[cmdargc++] = pstrdup(s.data); free(dsn); /* source file */ cmdargv[cmdargc++] = pstrdup(srcfile); cmdargv[cmdargc++] = NULL; if (exec_cmd(pg_restore, cmdargv) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not execute pg_restore (\"%s\"): %m", pg_restore))); } /* * Create slot and get the exported snapshot. * * This will try to recreate slot if already exists and not active. * * The reported LSN is the confirmed flush LSN at the point the slot reached * consistency and exported its snapshot. */ static char * ensure_replication_slot_snapshot(PGconn *sql_conn, PGconn *repl_conn, char *slot_name, bool use_failover_slot, XLogRecPtr *lsn) { PGresult *res; StringInfoData query; char *snapshot; retry: initStringInfo(&query); appendStringInfo(&query, "CREATE_REPLICATION_SLOT \"%s\" LOGICAL %s%s", slot_name, "pglogical_output", use_failover_slot ? " FAILOVER" : ""); res = PQexec(repl_conn, query.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { const char *sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); /* * If our slot already exist but is not used, it's leftover from * previous unsucessful attempt to synchronize table, try dropping * it and recreating. */ if (sqlstate && strcmp(sqlstate, "42710" /*ERRCODE_DUPLICATE_OBJECT*/) == 0 && !pglogical_remote_slot_active(sql_conn, slot_name)) { pfree(query.data); PQclear(res); pglogical_drop_remote_slot(sql_conn, slot_name); goto retry; } elog(ERROR, "could not create replication slot on provider: %s\n", PQresultErrorMessage(res)); } *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid, CStringGetDatum(PQgetvalue(res, 0, 1)))); snapshot = pstrdup(PQgetvalue(res, 0, 2)); PQclear(res); return snapshot; } /* * Get or create replication origin for a given slot. */ static RepOriginId ensure_replication_origin(char *slot_name) { RepOriginId origin = replorigin_by_name(slot_name, true); if (origin == InvalidRepOriginId) origin = replorigin_create(slot_name); return origin; } /* * Transaction management for COPY. */ static void start_copy_origin_tx(PGconn *conn, const char *snapshot) { PGresult *res; char *s; const char *setup_query = "BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY;\n" "SET DATESTYLE = ISO;\n" "SET INTERVALSTYLE = POSTGRES;\n" "SET extra_float_digits TO 3;\n" "SET statement_timeout = 0;\n" "SET lock_timeout = 0;\n"; StringInfoData query; initStringInfo(&query); appendStringInfoString(&query, setup_query); if (snapshot) { s = PQescapeLiteral(conn, snapshot, strlen(snapshot)); appendStringInfo(&query, "SET TRANSACTION SNAPSHOT %s;\n", s); } res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_COMMAND_OK) elog(ERROR, "BEGIN on origin node failed: %s", PQresultErrorMessage(res)); PQclear(res); } static void start_copy_target_tx(PGconn *conn, const char *origin_name) { PGresult *res; char *s; const char *setup_query = "BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;\n" "SET session_replication_role = 'replica';\n" "SET DATESTYLE = ISO;\n" "SET INTERVALSTYLE = POSTGRES;\n" "SET extra_float_digits TO 3;\n" "SET statement_timeout = 0;\n" "SET lock_timeout = 0;\n"; StringInfoData query; initStringInfo(&query); /* * Set correct origin if target db supports it. * We must do this before starting the transaction otherwise the status * code bellow would get much more complicated. */ if (PQserverVersion(conn) >= 90500) { s = PQescapeLiteral(conn, origin_name, strlen(origin_name)); appendStringInfo(&query, "SELECT pg_catalog.pg_replication_origin_session_setup(%s);\n", s); PQfreemem(s); } appendStringInfoString(&query, setup_query); res = PQexec(conn, query.data); if (PQresultStatus(res) != PGRES_COMMAND_OK) elog(ERROR, "BEGIN on target node failed: %s", PQresultErrorMessage(res)); PQclear(res); } static void finish_copy_origin_tx(PGconn *conn) { PGresult *res; /* Close the transaction and connection on origin node. */ res = PQexec(conn, "ROLLBACK"); if (PQresultStatus(res) != PGRES_COMMAND_OK) elog(WARNING, "ROLLBACK on origin node failed: %s", PQresultErrorMessage(res)); PQclear(res); PQfinish(conn); } static void finish_copy_target_tx(PGconn *conn) { PGresult *res; /* Close the transaction and connection on target node. */ res = PQexec(conn, "COMMIT"); if (PQresultStatus(res) != PGRES_COMMAND_OK) elog(ERROR, "COMMIT on target node failed: %s", PQresultErrorMessage(res)); PQclear(res); /* * Resetting the origin explicitly before the backend exits will help * prevent races with other accesses to the same replication origin. */ if (PQserverVersion(conn) >= 90500) { res = PQexec(conn, "SELECT pg_catalog.pg_replication_origin_session_reset();\n"); if (PQresultStatus(res) != PGRES_TUPLES_OK) elog(WARNING, "Resetting session origin on target node failed: %s", PQresultErrorMessage(res)); PQclear(res); } PQfinish(conn); } static int physatt_in_attmap(PGLogicalRelation *rel, int attid) { AttrNumber i; for (i = 0; i < rel->natts; i++) if (rel->attmap[i] == attid) return i; return -1; } /* * Create list of columns for COPY based on logical relation mapping. */ static List * make_copy_attnamelist(PGLogicalRelation *rel) { List *attnamelist = NIL; TupleDesc desc = RelationGetDescr(rel->rel); int attnum; for (attnum = 0; attnum < desc->natts; attnum++) { int remoteattnum = physatt_in_attmap(rel, attnum); /* Skip dropped attributes. */ if (TupleDescAttr(desc,attnum)->attisdropped) continue; if (remoteattnum < 0) continue; attnamelist = lappend(attnamelist, makeString(rel->attnames[remoteattnum])); } return attnamelist; } /* * COPY single table over wire. */ static void copy_table_data(PGconn *origin_conn, PGconn *target_conn, PGLogicalRemoteRel *remoterel, List *replication_sets) { PGLogicalRelation *rel; PGresult *res; int bytes; char *copybuf; List *attnamelist; ListCell *lc; bool first; StringInfoData query; StringInfoData attlist; MemoryContext curctx = CurrentMemoryContext, oldctx; /* Build the relation map. */ StartTransactionCommand(); oldctx = MemoryContextSwitchTo(curctx); pglogical_relation_cache_updater(remoterel); rel = pglogical_relation_open(remoterel->relid, AccessShareLock); attnamelist = make_copy_attnamelist(rel); initStringInfo(&attlist); first = true; foreach (lc, attnamelist) { char *attname = strVal(lfirst(lc)); if (first) first = false; else appendStringInfoString(&attlist, ","); appendStringInfoString(&attlist, PQescapeIdentifier(origin_conn, attname, strlen(attname))); } MemoryContextSwitchTo(oldctx); pglogical_relation_close(rel, AccessShareLock); CommitTransactionCommand(); /* Build COPY TO query. */ initStringInfo(&query); appendStringInfoString(&query, "COPY "); /* * If the table is row-filtered we need to run query over the table * to execute the filter. */ if (remoterel->hasRowFilter) { StringInfoData relname; StringInfoData repsetarr; ListCell *lc; initStringInfo(&relname); appendStringInfo(&relname, "%s.%s", PQescapeIdentifier(origin_conn, remoterel->nspname, strlen(remoterel->nspname)), PQescapeIdentifier(origin_conn, remoterel->relname, strlen(remoterel->relname))); initStringInfo(&repsetarr); first = true; foreach (lc, replication_sets) { char *repset_name = lfirst(lc); if (first) first = false; else appendStringInfoChar(&repsetarr, ','); appendStringInfo(&repsetarr, "%s", PQescapeLiteral(origin_conn, repset_name, strlen(repset_name))); } appendStringInfo(&query, "(SELECT %s FROM pglogical.table_data_filtered(NULL::%s, %s::regclass, ARRAY[%s])) ", list_length(attnamelist) ? attlist.data : "*", relname.data, PQescapeLiteral(origin_conn, relname.data, relname.len), repsetarr.data); } else { /* Otherwise just copy the table. */ appendStringInfo(&query, "%s.%s ", PQescapeIdentifier(origin_conn, remoterel->nspname, strlen(remoterel->nspname)), PQescapeIdentifier(origin_conn, remoterel->relname, strlen(remoterel->relname))); if (list_length(attnamelist)) appendStringInfo(&query, "(%s) ", attlist.data); } appendStringInfoString(&query, "TO stdout"); /* Execute COPY TO. */ res = PQexec(origin_conn, query.data); if (PQresultStatus(res) != PGRES_COPY_OUT) { ereport(ERROR, (errmsg("table copy failed"), errdetail("Query '%s': %s", query.data, PQerrorMessage(origin_conn)))); } /* Build COPY FROM query. */ resetStringInfo(&query); appendStringInfo(&query, "COPY %s.%s ", PQescapeIdentifier(origin_conn, remoterel->nspname, strlen(remoterel->nspname)), PQescapeIdentifier(origin_conn, remoterel->relname, strlen(remoterel->relname))); if (list_length(attnamelist)) appendStringInfo(&query, "(%s) ", attlist.data); appendStringInfoString(&query, "FROM stdin"); /* Execute COPY FROM. */ res = PQexec(target_conn, query.data); if (PQresultStatus(res) != PGRES_COPY_IN) { ereport(ERROR, (errmsg("table copy failed"), errdetail("Query '%s': %s", query.data, PQerrorMessage(origin_conn)))); } while ((bytes = PQgetCopyData(origin_conn, ©buf, false)) > 0) { if (PQputCopyData(target_conn, copybuf, bytes) != 1) { ereport(ERROR, (errmsg("writing to target table failed"), errdetail("destination connection reported: %s", PQerrorMessage(target_conn)))); } PQfreemem(copybuf); CHECK_FOR_INTERRUPTS(); } if (bytes != -1) { ereport(ERROR, (errmsg("reading from origin table failed"), errdetail("source connection returned %d: %s", bytes, PQerrorMessage(origin_conn)))); } /* Send local finish */ if (PQputCopyEnd(target_conn, NULL) != 1) { ereport(ERROR, (errmsg("sending copy-completion to destination connection failed"), errdetail("destination connection reported: %s", PQerrorMessage(target_conn)))); } PQclear(res); elog(INFO, "finished synchronization of data for table %s.%s", remoterel->nspname, remoterel->relname); } /* * Copy data from origin node to target node. * * Creates new connection to origin and target. */ static void copy_tables_data(char *sub_name, const char *origin_dsn, const char *target_dsn, const char *origin_snapshot, List *tables, List *replication_sets, const char *origin_name) { PGconn *origin_conn; PGconn *target_conn; ListCell *lc; /* Connect to origin node. */ origin_conn = pglogical_connect(origin_dsn, sub_name, "copy"); start_copy_origin_tx(origin_conn, origin_snapshot); /* Connect to target node. */ target_conn = pglogical_connect(target_dsn, sub_name, "copy"); start_copy_target_tx(target_conn, origin_name); /* Copy every table. */ foreach (lc, tables) { RangeVar *rv = lfirst(lc); PGLogicalRemoteRel *remoterel; remoterel = pg_logical_get_remote_repset_table(origin_conn, rv, replication_sets); copy_table_data(origin_conn, target_conn, remoterel, replication_sets); CHECK_FOR_INTERRUPTS(); } /* Finish the transactions and disconnect. */ finish_copy_origin_tx(origin_conn); finish_copy_target_tx(target_conn); } /* * Copy data from origin node to target node. * * Creates new connection to origin and target. * * This is basically same as the copy_tables_data, but it can't be easily * merged to single function because we need to get list of tables here after * the transaction is bound to a snapshot. */ static List * copy_replication_sets_data(char *sub_name, const char *origin_dsn, const char *target_dsn, const char *origin_snapshot, List *replication_sets, const char *origin_name) { PGconn *origin_conn; PGconn *target_conn; List *tables; ListCell *lc; /* Connect to origin node. */ origin_conn = pglogical_connect(origin_dsn, sub_name, "copy"); start_copy_origin_tx(origin_conn, origin_snapshot); /* Get tables to copy from origin node. */ tables = pg_logical_get_remote_repset_tables(origin_conn, replication_sets); /* Connect to target node. */ target_conn = pglogical_connect(target_dsn, sub_name, "copy"); start_copy_target_tx(target_conn, origin_name); /* Copy every table. */ foreach (lc, tables) { PGLogicalRemoteRel *remoterel = lfirst(lc); copy_table_data(origin_conn, target_conn, remoterel, replication_sets); CHECK_FOR_INTERRUPTS(); } /* Finish the transactions and disconnect. */ finish_copy_origin_tx(origin_conn); finish_copy_target_tx(target_conn); return tables; } static void pglogical_sync_worker_cleanup(PGLogicalSubscription *sub) { PGconn *origin_conn; /* Drop the slot on the remote side. */ origin_conn = pglogical_connect(sub->origin_if->dsn, sub->name, "cleanup"); /* Wait for slot to be free. */ while (!got_SIGTERM) { int rc; if (!pglogical_remote_slot_active(origin_conn, sub->slot_name)) break; rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); } pglogical_drop_remote_slot(origin_conn, sub->slot_name); PQfinish(origin_conn); /* Drop the origin tracking locally. */ if (replorigin_session_origin != InvalidRepOriginId) { replorigin_session_reset(); #if PG_VERSION_NUM >= 140000 replorigin_drop_by_name(sub->slot_name, true, true); #else replorigin_drop(replorigin_session_origin, true); #endif replorigin_session_origin = InvalidRepOriginId; } } static void pglogical_sync_worker_cleanup_error_cb(int code, Datum arg) { PGLogicalSubscription *sub = (PGLogicalSubscription *) DatumGetPointer(arg); pglogical_sync_worker_cleanup(sub); } static void pglogical_sync_tmpfile_cleanup_cb(int code, Datum arg) { const char *tmpfile = DatumGetCString(arg); if (unlink(tmpfile) != 0 && errno != ENOENT) elog(WARNING, "Failed to clean up pglogical temporary dump file \"%s\" on exit/error: %m", tmpfile); } void pglogical_sync_subscription(PGLogicalSubscription *sub) { PGLogicalSyncStatus *sync; XLogRecPtr lsn; char status; MemoryContext myctx, oldctx; /* We need our own context for keeping things between transactions. */ myctx = AllocSetContextCreate(CurrentMemoryContext, "pglogical_sync_subscription cxt", ALLOCSET_DEFAULT_SIZES); StartTransactionCommand(); oldctx = MemoryContextSwitchTo(myctx); sync = get_subscription_sync_status(sub->id, false); MemoryContextSwitchTo(oldctx); CommitTransactionCommand(); status = sync->status; switch (status) { /* Already synced, nothing to do except cleanup. */ case SYNC_STATUS_READY: MemoryContextDelete(myctx); return; /* We can recover from crashes during these. */ case SYNC_STATUS_INIT: case SYNC_STATUS_CATCHUP: break; default: elog(ERROR, "subscriber %s initialization failed during nonrecoverable step (%c), please try the setup again", sub->name, status); break; } if (status == SYNC_STATUS_INIT) { PGconn *origin_conn; PGconn *origin_conn_repl; RepOriginId originid; char *snapshot; bool use_failover_slot; elog(INFO, "initializing subscriber %s", sub->name); origin_conn = pglogical_connect(sub->origin_if->dsn, sub->name, "snap"); /* 2QPG9.6 and 2QPG11 support failover slots */ use_failover_slot = pglogical_remote_function_exists(origin_conn, "pg_catalog", "pg_create_logical_replication_slot", -1, "failover"); origin_conn_repl = pglogical_connect_replica(sub->origin_if->dsn, sub->name, "snap"); snapshot = ensure_replication_slot_snapshot(origin_conn, origin_conn_repl, sub->slot_name, use_failover_slot, &lsn); PQfinish(origin_conn); PG_ENSURE_ERROR_CLEANUP(pglogical_sync_worker_cleanup_error_cb, PointerGetDatum(sub)); { char tmpfile[MAXPGPATH]; snprintf(tmpfile, MAXPGPATH, "%s/pglogical-%d.dump", pglogical_temp_directory, MyProcPid); canonicalize_path(tmpfile); PG_ENSURE_ERROR_CLEANUP(pglogical_sync_tmpfile_cleanup_cb, CStringGetDatum(tmpfile)); { #if PG_VERSION_NUM >= 90500 Relation replorigin_rel; #endif StartTransactionCommand(); originid = ensure_replication_origin(sub->slot_name); elog(DEBUG3, "advancing origin with oid %u for forwarded row to %X/%X during subscription sync", originid, (uint32)(XactLastCommitEnd>>32), (uint32)XactLastCommitEnd); #if PG_VERSION_NUM >= 90500 replorigin_rel = table_open(ReplicationOriginRelationId, RowExclusiveLock); #endif replorigin_advance(originid, lsn, XactLastCommitEnd, true, true); #if PG_VERSION_NUM >= 90500 table_close(replorigin_rel, RowExclusiveLock); #endif CommitTransactionCommand(); if (SyncKindStructure(sync->kind)) { elog(INFO, "synchronizing structure"); status = SYNC_STATUS_STRUCTURE; StartTransactionCommand(); set_subscription_sync_status(sub->id, status); CommitTransactionCommand(); /* Dump structure to temp storage. */ dump_structure(sub, tmpfile, snapshot); /* Restore base pre-data structure (types, tables, etc). */ restore_structure(sub, tmpfile, "pre-data"); } /* Copy data. */ if (SyncKindData(sync->kind)) { List *tables; ListCell *lc; elog(INFO, "synchronizing data"); status = SYNC_STATUS_DATA; StartTransactionCommand(); set_subscription_sync_status(sub->id, status); CommitTransactionCommand(); tables = copy_replication_sets_data(sub->name, sub->origin_if->dsn, sub->target_if->dsn, snapshot, sub->replication_sets, sub->slot_name); /* Store info about all the synchronized tables. */ StartTransactionCommand(); foreach (lc, tables) { PGLogicalRemoteRel *remoterel = lfirst(lc); PGLogicalSyncStatus *oldsync; oldsync = get_table_sync_status(sub->id, remoterel->nspname, remoterel->relname, true); if (oldsync) { set_table_sync_status(sub->id, remoterel->nspname, remoterel->relname, SYNC_STATUS_READY, lsn); } else { PGLogicalSyncStatus newsync; newsync.kind = SYNC_KIND_FULL; newsync.subid = sub->id; namestrcpy(&newsync.nspname, remoterel->nspname); namestrcpy(&newsync.relname, remoterel->relname); newsync.status = SYNC_STATUS_READY; newsync.statuslsn = lsn; create_local_sync_status(&newsync); } } CommitTransactionCommand(); } /* Restore post-data structure (indexes, constraints, etc). */ if (SyncKindStructure(sync->kind)) { elog(INFO, "synchronizing constraints"); status = SYNC_STATUS_CONSTRAINTS; StartTransactionCommand(); set_subscription_sync_status(sub->id, status); CommitTransactionCommand(); restore_structure(sub, tmpfile, "post-data"); } } PG_END_ENSURE_ERROR_CLEANUP(pglogical_sync_tmpfile_cleanup_cb, CStringGetDatum(tmpfile)); pglogical_sync_tmpfile_cleanup_cb(0, CStringGetDatum(tmpfile)); } PG_END_ENSURE_ERROR_CLEANUP(pglogical_sync_worker_cleanup_error_cb, PointerGetDatum(sub)); PQfinish(origin_conn_repl); status = SYNC_STATUS_CATCHUP; StartTransactionCommand(); set_subscription_sync_status(sub->id, status); CommitTransactionCommand(); } if (status == SYNC_STATUS_CATCHUP) { /* Nothing to do here yet. */ status = SYNC_STATUS_READY; StartTransactionCommand(); set_subscription_sync_status(sub->id, status); CommitTransactionCommand(); elog(INFO, "finished synchronization of subscriber %s, ready to enter normal replication", sub->name); } MemoryContextDelete(myctx); } char pglogical_sync_table(PGLogicalSubscription *sub, RangeVar *table, XLogRecPtr *status_lsn) { PGconn *origin_conn_repl, *origin_conn; RepOriginId originid; char *snapshot; PGLogicalSyncStatus *sync; StartTransactionCommand(); /* Sanity check. */ sync = get_subscription_sync_status(sub->id, false); if (sync->status != SYNC_STATUS_READY) { elog(ERROR, "subscriber %s is not ready, cannot synchronzie individual tables", sub->name); } /* Check current state of the table. */ sync = get_table_sync_status(sub->id, table->schemaname, table->relname, false); *status_lsn = sync->statuslsn; /* Already synchronized, nothing to do here. */ if (sync->status == SYNC_STATUS_READY || sync->status == SYNC_STATUS_SYNCDONE) return sync->status; /* If previous sync attempt failed, we need to start from beginning. */ if (sync->status != SYNC_STATUS_INIT) set_table_sync_status(sub->id, table->schemaname, table->relname, SYNC_STATUS_INIT, InvalidXLogRecPtr); CommitTransactionCommand(); origin_conn_repl = pglogical_connect_replica(sub->origin_if->dsn, sub->name, "copy"); origin_conn = pglogical_connect(sub->origin_if->dsn, sub->name, "copy_slot"); snapshot = ensure_replication_slot_snapshot(origin_conn, origin_conn_repl, sub->slot_name, false, status_lsn); PQfinish(origin_conn); /* Make sure we cleanup the slot if something goes wrong. */ PG_ENSURE_ERROR_CLEANUP(pglogical_sync_worker_cleanup_error_cb, PointerGetDatum(sub)); { #if PG_VERSION_NUM >= 90500 Relation replorigin_rel; #endif StartTransactionCommand(); originid = ensure_replication_origin(sub->slot_name); elog(DEBUG2, "advancing origin %s (oid %u) for forwarded row to %X/%X after sync error", MySubscription->slot_name, originid, (uint32)(XactLastCommitEnd>>32), (uint32)XactLastCommitEnd); #if PG_VERSION_NUM >= 90500 replorigin_rel = table_open(ReplicationOriginRelationId, RowExclusiveLock); #endif replorigin_advance(originid, *status_lsn, XactLastCommitEnd, true, true); #if PG_VERSION_NUM >= 90500 table_close(replorigin_rel, RowExclusiveLock); #endif set_table_sync_status(sub->id, table->schemaname, table->relname, SYNC_STATUS_DATA, *status_lsn); CommitTransactionCommand(); /* Copy data. */ copy_tables_data(sub->name, sub->origin_if->dsn,sub->target_if->dsn, snapshot, list_make1(table), sub->replication_sets, sub->slot_name); } PG_END_ENSURE_ERROR_CLEANUP(pglogical_sync_worker_cleanup_error_cb, PointerGetDatum(sub)); PQfinish(origin_conn_repl); return SYNC_STATUS_SYNCWAIT; } void pglogical_sync_worker_finish(void) { PGLogicalWorker *apply; /* * Commit any outstanding transaction. This is the usual case, unless * there was nothing to do for the table. */ if (IsTransactionState()) { CommitTransactionCommand(); pgstat_report_stat(false); } /* And flush all writes. */ XLogFlush(GetXLogWriteRecPtr()); StartTransactionCommand(); pglogical_sync_worker_cleanup(MySubscription); CommitTransactionCommand(); /* * In case there is apply process running, it might be waiting * for the table status change so tell it to check. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); apply = pglogical_apply_find(MyPGLogicalWorker->dboid, MyApplyWorker->subid); if (pglogical_worker_running(apply)) SetLatch(&apply->proc->procLatch); LWLockRelease(PGLogicalCtx->lock); elog(LOG, "finished sync of table %s.%s for subscriber %s", NameStr(MySyncWorker->nspname), NameStr(MySyncWorker->relname), MySubscription->name); } void pglogical_sync_main(Datum main_arg) { int slot = DatumGetInt32(main_arg); PGconn *streamConn; RepOriginId originid; XLogRecPtr lsn; XLogRecPtr status_lsn; StringInfoData slot_name; RangeVar *copytable = NULL; MemoryContext saved_ctx; char *tablename; char status; /* Setup shmem. */ pglogical_worker_attach(slot, PGLOGICAL_WORKER_SYNC); MySyncWorker = &MyPGLogicalWorker->worker.sync; MyApplyWorker = &MySyncWorker->apply; /* Establish signal handlers. */ pqsignal(SIGTERM, handle_sigterm); /* Attach to dsm segment. */ Assert(CurrentResourceOwner == NULL); CurrentResourceOwner = ResourceOwnerCreate(NULL, "pglogical sync"); /* Setup synchronous commit according to the user's wishes */ SetConfigOption("synchronous_commit", pglogical_synchronous_commit ? "local" : "off", PGC_BACKEND, PGC_S_OVERRIDE); /* other context? */ /* Run as replica session replication role. */ SetConfigOption("session_replication_role", "replica", PGC_SUSET, PGC_S_OVERRIDE); /* other context? */ /* * Disable function body checks during replay. That's necessary because a) * the creator of the function might have had it disabled b) the function * might be search_path dependant and we don't fix the contents of * functions. */ SetConfigOption("check_function_bodies", "off", PGC_INTERNAL, PGC_S_OVERRIDE); StartTransactionCommand(); saved_ctx = MemoryContextSwitchTo(TopMemoryContext); MySubscription = get_subscription(MySyncWorker->apply.subid); MemoryContextSwitchTo(saved_ctx); CommitTransactionCommand(); copytable = makeRangeVar(NameStr(MySyncWorker->nspname), NameStr(MySyncWorker->relname), -1); tablename = quote_qualified_identifier(copytable->schemaname, copytable->relname); initStringInfo(&slot_name); appendStringInfo(&slot_name, "%s_%08x", MySubscription->slot_name, DatumGetUInt32(hash_any((unsigned char *) tablename, strlen(tablename)))); MySubscription->slot_name = slot_name.data; elog(LOG, "starting sync of table %s.%s for subscriber %s", copytable->schemaname, copytable->relname, MySubscription->name); elog(DEBUG1, "connecting to provider %s, dsn %s", MySubscription->origin_if->name, MySubscription->origin_if->dsn); /* Do the initial sync first. */ status = pglogical_sync_table(MySubscription, copytable, &status_lsn); if (status == SYNC_STATUS_SYNCDONE || status == SYNC_STATUS_READY) { pglogical_sync_worker_finish(); proc_exit(0); } /* Wait for ack from the main apply thread. */ StartTransactionCommand(); set_table_sync_status(MySubscription->id, copytable->schemaname, copytable->relname, SYNC_STATUS_SYNCWAIT, status_lsn); CommitTransactionCommand(); wait_for_sync_status_change(MySubscription->id, copytable->schemaname, copytable->relname, SYNC_STATUS_CATCHUP, &lsn); Assert(lsn == status_lsn); /* Setup the origin and get the starting position for the replication. */ StartTransactionCommand(); originid = replorigin_by_name(MySubscription->slot_name, false); elog(DEBUG2, "setting origin %s (oid %u) for subscription sync", MySubscription->slot_name, originid); replorigin_session_setup(originid); replorigin_session_origin = originid; Assert(status_lsn == replorigin_session_get_progress(false)); /* * In case there is nothing to catchup, finish immediately. * Note pglogical_sync_worker_finish() will commit. */ if (status_lsn >= MyApplyWorker->replay_stop_lsn) { /* Mark local table as done. */ set_table_sync_status(MyApplyWorker->subid, NameStr(MyPGLogicalWorker->worker.sync.nspname), NameStr(MyPGLogicalWorker->worker.sync.relname), SYNC_STATUS_SYNCDONE, status_lsn); pglogical_sync_worker_finish(); proc_exit(0); } CommitTransactionCommand(); /* Start the replication. */ streamConn = pglogical_connect_replica(MySubscription->origin_if->dsn, MySubscription->name, "catchup"); /* * IDENTIFY_SYSTEM sets up some internal state on walsender so call it even * if we don't (yet) want to use any of the results. */ pglogical_identify_system(streamConn, NULL, NULL, NULL, NULL); pglogical_start_replication(streamConn, MySubscription->slot_name, status_lsn, "all", NULL, tablename, MySubscription->force_text_transfer); /* Leave it to standard apply code to do the replication. */ apply_work(streamConn); PQfinish(streamConn); /* * We should only get here if we received sigTERM, which in case of * sync worker is not expected. */ proc_exit(1); } /* Catalog access */ /* Create subscription sync status record in catalog. */ void create_local_sync_status(PGLogicalSyncStatus *sync) { RangeVar *rv; Relation rel; TupleDesc tupDesc; HeapTuple tup; Datum values[Natts_local_sync_state]; bool nulls[Natts_local_sync_state]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); /* Form a tuple. */ memset(nulls, false, sizeof(nulls)); values[Anum_sync_kind - 1] = CharGetDatum(sync->kind); values[Anum_sync_subid - 1] = ObjectIdGetDatum(sync->subid); if (sync->nspname.data[0]) values[Anum_sync_nspname - 1] = NameGetDatum(&sync->nspname); else nulls[Anum_sync_nspname - 1] = true; if (sync->relname.data[0]) values[Anum_sync_relname - 1] = NameGetDatum(&sync->relname); else nulls[Anum_sync_relname - 1] = true; values[Anum_sync_status - 1] = CharGetDatum(sync->status); values[Anum_sync_statuslsn - 1] = LSNGetDatum(sync->statuslsn); tup = heap_form_tuple(tupDesc, values, nulls); /* Insert the tuple to the catalog. */ CatalogTupleInsert(rel, tup); /* Cleanup. */ heap_freetuple(tup); table_close(rel, RowExclusiveLock); } /* Remove subscription sync status record from catalog. */ void drop_subscription_sync_status(Oid subid) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); /* Remove the tuples. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } static PGLogicalSyncStatus * syncstatus_fromtuple(HeapTuple tuple, TupleDesc desc) { PGLogicalSyncStatus *sync; Datum d; bool isnull; sync = (PGLogicalSyncStatus *) palloc0(sizeof(PGLogicalSyncStatus)); d = fastgetattr(tuple, Anum_sync_kind, desc, &isnull); Assert(!isnull); sync->kind = DatumGetChar(d); d = fastgetattr(tuple, Anum_sync_subid, desc, &isnull); Assert(!isnull); sync->subid = DatumGetObjectId(d); d = fastgetattr(tuple, Anum_sync_nspname, desc, &isnull); if (!isnull) namestrcpy(&sync->nspname, NameStr(*DatumGetName(d))); d = fastgetattr(tuple, Anum_sync_relname, desc, &isnull); if (!isnull) namestrcpy(&sync->relname, NameStr(*DatumGetName(d))); d = fastgetattr(tuple, Anum_sync_status, desc, &isnull); Assert(!isnull); sync->status = DatumGetChar(d); d = fastgetattr(tuple, Anum_sync_statuslsn, desc, &isnull); Assert(!isnull); sync->statuslsn = DatumGetLSN(d); return sync; } /* Get the sync status for a subscription. */ PGLogicalSyncStatus * get_subscription_sync_status(Oid subid, bool missing_ok) { PGLogicalSyncStatus *sync; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; TupleDesc tupDesc; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { if (pgl_heap_attisnull(tuple, Anum_sync_nspname, NULL) && pgl_heap_attisnull(tuple, Anum_sync_relname, NULL)) break; } if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } elog(ERROR, "subscription %u status not found", subid); } sync = syncstatus_fromtuple(tuple, tupDesc); systable_endscan(scan); table_close(rel, RowExclusiveLock); return sync; } /* Set the sync status for a subscription. */ void set_subscription_sync_status(Oid subid, char status) { RangeVar *rv; Relation rel; TupleDesc tupDesc; SysScanDesc scan; HeapTuple oldtup, newtup; ScanKeyData key[1]; Datum values[Natts_local_sync_state]; bool nulls[Natts_local_sync_state]; bool replaces[Natts_local_sync_state]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(oldtup = systable_getnext(scan))) { if (pgl_heap_attisnull(oldtup, Anum_sync_nspname, NULL) && pgl_heap_attisnull(oldtup, Anum_sync_relname, NULL)) break; } if (!HeapTupleIsValid(oldtup)) elog(ERROR, "subscription %u status not found", subid); memset(nulls, false, sizeof(nulls)); memset(replaces, false, sizeof(replaces)); values[Anum_sync_status - 1] = CharGetDatum(status); replaces[Anum_sync_status - 1] = true; values[Anum_sync_statuslsn - 1] = LSNGetDatum(InvalidXLogRecPtr); replaces[Anum_sync_statuslsn - 1] = true; newtup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); /* Update the tuple in catalog. */ CatalogTupleUpdate(rel, &oldtup->t_self, newtup); /* Cleanup. */ heap_freetuple(newtup); systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* Remove table sync status record from catalog. */ void drop_table_sync_status(const char *nspname, const char *relname) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[2]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_sync_nspname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(nspname)); ScanKeyInit(&key[1], Anum_sync_relname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(relname)); scan = systable_beginscan(rel, 0, true, NULL, 2, key); /* Remove the tuples. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* Remove table sync status record from catalog. */ void drop_table_sync_status_for_sub(Oid subid, const char *nspname, const char *relname) { RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[3]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); ScanKeyInit(&key[1], Anum_sync_nspname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(nspname)); ScanKeyInit(&key[2], Anum_sync_relname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(relname)); scan = systable_beginscan(rel, 0, true, NULL, 3, key); /* Remove the tuples. */ while (HeapTupleIsValid(tuple = systable_getnext(scan))) simple_heap_delete(rel, &tuple->t_self); /* Cleanup. */ systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* Get the sync status for a table. */ PGLogicalSyncStatus * get_table_sync_status(Oid subid, const char *nspname, const char *relname, bool missing_ok) { PGLogicalSyncStatus *sync; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[3]; TupleDesc tupDesc; Oid idxoid = InvalidOid; List *indexes; ListCell *l; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); /* Find an index we can use to scan this catalog. */ indexes = RelationGetIndexList(rel); foreach (l, indexes) { Relation idx = index_open(lfirst_oid(l), AccessShareLock); if (idx->rd_index->indkey.values[0] == Anum_sync_subid && idx->rd_index->indkey.values[1] == Anum_sync_nspname && idx->rd_index->indkey.values[2] == Anum_sync_relname) { idxoid = lfirst_oid(l); index_close(idx, AccessShareLock); break; } index_close(idx, AccessShareLock); } if (!OidIsValid(idxoid)) elog(ERROR, "could not find index on local_sync_status"); list_free(indexes); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); ScanKeyInit(&key[1], Anum_sync_nspname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(nspname)); ScanKeyInit(&key[2], Anum_sync_relname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(relname)); scan = systable_beginscan(rel, idxoid, true, NULL, 3, key); tuple = systable_getnext(scan); if (!HeapTupleIsValid(tuple)) { if (missing_ok) { systable_endscan(scan); table_close(rel, RowExclusiveLock); return NULL; } elog(ERROR, "subscription %u table %s.%s status not found", subid, nspname, relname); } sync = syncstatus_fromtuple(tuple, tupDesc); systable_endscan(scan); table_close(rel, RowExclusiveLock); return sync; } /* Get the sync status for a table. */ List * get_unsynced_tables(Oid subid) { PGLogicalSyncStatus *sync; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; List *res = NIL; TupleDesc tupDesc; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { if (pgl_heap_attisnull(tuple, Anum_sync_nspname, NULL) && pgl_heap_attisnull(tuple, Anum_sync_relname, NULL)) continue; sync = syncstatus_fromtuple(tuple, tupDesc); if (sync->status != SYNC_STATUS_READY) res = lappend(res, sync); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } /* Get the sync status for all tables known to subscription. */ List * get_subscription_tables(Oid subid) { PGLogicalSyncStatus *sync; RangeVar *rv; Relation rel; SysScanDesc scan; HeapTuple tuple; ScanKeyData key[1]; List *res = NIL; TupleDesc tupDesc; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); scan = systable_beginscan(rel, 0, true, NULL, 1, key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { if (pgl_heap_attisnull(tuple, Anum_sync_nspname, NULL) && pgl_heap_attisnull(tuple, Anum_sync_relname, NULL)) continue; sync = syncstatus_fromtuple(tuple, tupDesc); res = lappend(res, sync); } systable_endscan(scan); table_close(rel, RowExclusiveLock); return res; } /* Set the sync status for a table. */ void set_table_sync_status(Oid subid, const char *nspname, const char *relname, char status, XLogRecPtr statuslsn) { RangeVar *rv; Relation rel; TupleDesc tupDesc; SysScanDesc scan; HeapTuple oldtup, newtup; ScanKeyData key[3]; Datum values[Natts_local_sync_state]; bool nulls[Natts_local_sync_state]; bool replaces[Natts_local_sync_state]; rv = makeRangeVar(EXTENSION_NAME, CATALOG_LOCAL_SYNC_STATUS, -1); rel = table_openrv(rv, RowExclusiveLock); tupDesc = RelationGetDescr(rel); ScanKeyInit(&key[0], Anum_sync_subid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subid)); ScanKeyInit(&key[1], Anum_sync_nspname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(nspname)); ScanKeyInit(&key[2], Anum_sync_relname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(relname)); scan = systable_beginscan(rel, 0, true, NULL, 3, key); oldtup = systable_getnext(scan); if (!HeapTupleIsValid(oldtup)) elog(ERROR, "subscription %u table %s.%s status not found", subid, nspname, relname); memset(nulls, false, sizeof(nulls)); memset(replaces, false, sizeof(replaces)); values[Anum_sync_status - 1] = CharGetDatum(status); replaces[Anum_sync_status - 1] = true; values[Anum_sync_statuslsn - 1] = LSNGetDatum(statuslsn); replaces[Anum_sync_statuslsn - 1] = true; newtup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); /* Update the tuple in catalog. */ CatalogTupleUpdate(rel, &oldtup->t_self, newtup); /* Cleanup. */ heap_freetuple(newtup); systable_endscan(scan); table_close(rel, RowExclusiveLock); } /* * Wait until the table sync status has changed desired one. * * We also exit if the worker is no longer recognized as sync worker as * that means something bad happened to it. * * Care with allocations is required here since it typically runs * in TopMemoryContext. */ bool wait_for_sync_status_change(Oid subid, const char *nspname, const char *relname, char desired_state, XLogRecPtr *lsn) { int rc; MemoryContext old_ctx = CurrentMemoryContext; bool ret = false; *lsn = InvalidXLogRecPtr; Assert(!IsTransactionState()); while (!got_SIGTERM) { PGLogicalWorker *worker; PGLogicalSyncStatus *sync; StartTransactionCommand(); sync = get_table_sync_status(subid, nspname, relname, true); if (!sync) { CommitTransactionCommand(); break; } if (sync->status == desired_state) { *lsn = sync->statuslsn; CommitTransactionCommand(); ret = true; break; } CommitTransactionCommand(); (void) MemoryContextSwitchTo(old_ctx); /* Check if the worker is still alive - no point waiting if it died. */ LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); worker = pglogical_sync_find(MyDatabaseId, subid, nspname, relname); LWLockRelease(PGLogicalCtx->lock); if (!worker) break; rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 60000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); } (void) MemoryContextSwitchTo(old_ctx); return ret; } /* * Truncates table if it exists. */ void truncate_table(char *nspname, char *relname) { RangeVar *rv; Oid relid; TruncateStmt *truncate; StringInfoData sql; rv = makeRangeVar(nspname, relname, -1); relid = RangeVarGetRelid(rv, AccessExclusiveLock, true); if (relid == InvalidOid) return; initStringInfo(&sql); appendStringInfo(&sql, "TRUNCATE TABLE %s", quote_qualified_identifier(rv->schemaname, rv->relname)); /* Truncate the table. */ truncate = makeNode(TruncateStmt); truncate->relations = list_make1(rv); truncate->restart_seqs = false; truncate->behavior = DROP_RESTRICT; /* * We use standard_ProcessUtility to process the truncate statement. This * allows us to let Postgres-XL do the correct things in order to truncate * the table from the remote nodes. * * Except the query string, most other parameters are made-up. This is OK * for TruncateStmt, but if we ever decide to use standard_ProcessUtility * for other utility statements, then we must take a careful relook. */ #ifdef PGXC standard_ProcessUtility((Node *)truncate, sql.data, PROCESS_UTILITY_TOPLEVEL, NULL, NULL, false, NULL ); #else ExecuteTruncate(truncate); #endif /* release memory allocated to create SQL statement */ pfree(sql.data); CommandCounterIncrement(); } /* * exec_cmd support for win32 */ #ifdef WIN32 /* * Return formatted message from GetLastError() in a palloc'd string in the * current memory context, or a copy of a constant generic error string if * there's no recorded error state. */ static char * PglGetLastWin32Error(void) { LPVOID lpMsgBuf; DWORD dw = GetLastError(); char * pgstr = NULL; if (dw != ERROR_SUCCESS) { FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, dw, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL); pgstr = pstrdup((LPTSTR) lpMsgBuf); LocalFree(lpMsgBuf); } else { pgstr = pstrdup("Unknown error or no recent error"); } return pgstr; } /* * See https://docs.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way * for the utterly putrid way Windows handles command line arguments, and the insane lack of any inverse * form of the CommandLineToArgvW function in the win32 API. */ static void QuoteWindowsArgvElement(StringInfo cmdline, const char *arg, bool force) { if (!force && *arg != '\0' && strchr(arg, ' ') == NULL && strchr(arg, '\t') == NULL && strchr(arg, '\n') == NULL && strchr(arg, '\v') == NULL && strchr(arg, '"') == NULL) { appendStringInfoString(cmdline, arg); } else { const char *it; /* Begin quoted argument */ appendStringInfoChar(cmdline, '"'); /* * In terms of the algorithm described in CommandLineToArgvW's * documentation we are now "in quotes". */ for (it = arg; *it != '\0'; it++) { unsigned int NumberBackslashes = 0; /* * Accumulate runs of backslashes. They may or may not have special * meaning depending on what follows them. */ while (*it != '\0' && *it == '\\') { ++it; ++NumberBackslashes; } if (*it == '\0') { /* * Handle command line arguments ending with or consisting only * of backslashes. Particularly important for Windows, given * its backslash paths. * * We want NumberBackSlashes * 2 backslashes here to prevent the * final backslash from escaping the quote we'll append at the * end of the argument. */ for (; NumberBackslashes > 0; NumberBackslashes--) appendStringInfoString(cmdline, "\\\\"); break; } else if (*it == '"') { /* * Escape all accumulated backslashes, then append escaped * quotation mark. * * We want NumberBackSlashes * 2 + 1 backslashes to prevent * the backslashes from escaping the backslash we have to append * to escape the quote char that's part of the argument itself. */ for (; NumberBackslashes > 0; NumberBackslashes--) appendStringInfoString(cmdline, "\\\\"); appendStringInfoString(cmdline, "\\\""); } else { /* * A series of backslashes followed by something other than a * double quote is not special to the CommandLineToArgvW parser * in MSVCRT and must be appended literally. */ for (; NumberBackslashes > 0; NumberBackslashes--) appendStringInfoChar(cmdline, '\\'); /* Finally any normal char */ appendStringInfoChar(cmdline, *it); } } /* End quoted argument */ appendStringInfoChar(cmdline, '"'); /* * In terms of the algorithm described in CommandLineToArgvW's * documentation we are now "not in quotes". */ } } /* * Turn an execv-style argument vector into something that Win32's * CommandLineToArgvW will parse back into the original argument * vector. * * You'd think this would be part of the win32 API. But no... * * (This should arguably be part of libpq_fe.c, but I didn't want to expand our * abuse of PqExpBuffer.) */ static void QuoteWindowsArgv(StringInfo cmdline, const char * argv[]) { /* argv0 is required */ Assert(*argv != NULL && **argv != '\0'); QuoteWindowsArgvElement(cmdline, *argv, false); ++argv; for (; *argv != NULL; ++argv) { appendStringInfoChar(cmdline, ' '); QuoteWindowsArgvElement(cmdline, *argv, false); } } /* * Run a process on Windows and wait for it to exit, then return its exit code. * Preserve argument quoting. See exec_cmd() for the function contract details. * This is only split out to keep all the win32 horror separate for reability. * * Don't be tempted to use Win32's _spawnv. It is not like execv. It does *not* * preserve the individual arguments in the vector, it concatenates them * without any escaping or quoting. Thus any arguments with spaces, double * quotes, etc will be mangled by the child process's MSVC runtime when it * tries to turn the argument string back into an argument vector for the main * function by calling CommandLineToArgv() from the C library entrypoint. * _spawnv is also limited to 1024 characters not the 32767 characters permited * by the underlying Win32 APIs, and that could matter for pg_dump. * * This provides something more like we'e expect from execv and waitpid() * including a waitpid()-style return code with the exit code in the high * 8 bits of a 16 bit value. Use WEXITSTATUS() for the exit status. The * special value -1 is returned for a failure to launch the process, * wait for it, or get its exit code. */ static int exec_cmd_win32(const char *cmd, char *cmdargv[]) { BOOL ret; int exitcode = -1; PROCESS_INFORMATION pi; elog(DEBUG1, "trying to launch \"%s\"", cmd); /* Launch the process */ { STARTUPINFO si; StringInfoData cmdline; char *cmd_tmp; /* Deal with insane windows command line quoting */ initStringInfo(&cmdline); QuoteWindowsArgv(&cmdline, cmdargv); /* CreateProcess may scribble on the cmd string */ cmd_tmp = pstrdup(cmd); /* * STARTUPINFO contains various extra options for the process that are * not passed as CreateProcess flags, and is required. */ ZeroMemory( &si, sizeof(si) ); si.cb = sizeof(si); /* * PROCESS_INFORMATION accepts the returned process handle. */ ZeroMemory( &pi, sizeof(pi) ); ret = CreateProcess(cmd_tmp, cmdline.data, NULL /* default process attributes */, NULL /* default thread attributes */, TRUE /* handles (fds) are inherited, to match execv */, CREATE_NO_WINDOW /* process creation flags */, NULL /* inherit environment variables */, NULL /* inherit working directory */, &si, &pi); pfree(cmd_tmp); pfree(cmdline.data); } if (!ret) { char *winerr = PglGetLastWin32Error(); ereport(LOG, (errcode_for_file_access(), errmsg("failed to launch \"%s\": %s", cmd, winerr))); pfree(winerr); } else { /* * Process created. It can still fail due to DLL linkage errors, * startup problems etc, but the handle exists. * * Wait for it to exit, while responding to interrupts. Ideally we * should be able to use WaitEventSetWait here since Windows sees a * process handle much like a socket, but the Pg API for it won't * let us, so we have to DIY. */ elog(DEBUG1, "process launched, waiting"); do { ret = WaitForSingleObject( pi.hProcess, 500 /* timeout in ms */ ); /* * Note that if we elog(ERROR) or elog(FATAL) as a result of a * signal here we won't kill the child proc. */ CHECK_FOR_INTERRUPTS(); if (ret == WAIT_TIMEOUT) continue; if (ret != WAIT_OBJECT_0) { char *winerr = PglGetLastWin32Error(); ereport(DEBUG1, (errcode_for_file_access(), errmsg("unexpected WaitForSingleObject() return code %d while waiting for child process \"%s\": %s", ret, cmd, winerr))); pfree(winerr); /* Try to get the exit code anyway */ } if (!GetExitCodeProcess( pi.hProcess, &exitcode)) { char *winerr = PglGetLastWin32Error(); ereport(DEBUG1, (errcode_for_file_access(), errmsg("failed to get exit code from process \"%s\": %s", cmd, winerr))); pfree(winerr); /* Give up on learning about the process's outcome */ exitcode = -1; break; } else { /* Woken up for a reason other than child process termination */ if (exitcode == STILL_ACTIVE) continue; /* * Process must've exited, so code is a value from ExitProcess, * TerminateProcess, main or WinMain. */ ereport(DEBUG1, (errmsg("process \"%s\" exited with code %d", cmd, exitcode))); /* * Adapt exit code to WEXITSTATUS form to behave like waitpid(). * * The lower 8 bits are the terminating signal, with 0 for no * signal. */ exitcode = exitcode << 8; break; } } while (true); CloseHandle( pi.hProcess ); CloseHandle( pi.hThread ); } elog(DEBUG1, "exec_cmd_win32 for \"%s\" exiting with %d", cmd, exitcode); return exitcode; } #endif pglogical-REL2_4_1/pglogical_sync.h000066400000000000000000000060611415142317000173330ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_sync.h * table synchronization functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_sync.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_SYNC_H #define PGLOGICAL_SYNC_H #include "libpq-fe.h" #include "nodes/primnodes.h" #include "pglogical_node.h" typedef struct PGLogicalSyncStatus { char kind; Oid subid; NameData nspname; NameData relname; char status; XLogRecPtr statuslsn; /* remote lsn of the state change used for * synchronization coordination */ } PGLogicalSyncStatus; #define SYNC_KIND_INIT 'i' #define SYNC_KIND_FULL 'f' #define SYNC_KIND_STRUCTURE 's' #define SYNC_KIND_DATA 'd' #define SyncKindData(kind) \ (kind == SYNC_KIND_FULL || kind == SYNC_KIND_DATA) #define SyncKindStructure(kind) \ (kind == SYNC_KIND_FULL || kind == SYNC_KIND_STRUCTURE) #define SYNC_STATUS_NONE '\0' /* No sync. */ #define SYNC_STATUS_INIT 'i' /* Ask for sync. */ #define SYNC_STATUS_STRUCTURE 's' /* Sync structure */ #define SYNC_STATUS_DATA 'd' /* Data sync. */ #define SYNC_STATUS_CONSTRAINTS 'c' /* Constraint sync (post-data structure). */ #define SYNC_STATUS_SYNCWAIT 'w' /* Table sync is waiting to get OK from main thread. */ #define SYNC_STATUS_CATCHUP 'u' /* Catching up. */ #define SYNC_STATUS_SYNCDONE 'y' /* Synchronization finished (at lsn). */ #define SYNC_STATUS_READY 'r' /* Done. */ extern void pglogical_sync_worker_finish(void); extern void pglogical_sync_subscription(PGLogicalSubscription *sub); extern char pglogical_sync_table(PGLogicalSubscription *sub, RangeVar *table, XLogRecPtr *status_lsn); extern void create_local_sync_status(PGLogicalSyncStatus *sync); extern void drop_subscription_sync_status(Oid subid); extern PGLogicalSyncStatus *get_subscription_sync_status(Oid subid, bool missing_ok); extern void set_subscription_sync_status(Oid subid, char status); extern void drop_table_sync_status(const char *nspname, const char *relname); extern void drop_table_sync_status_for_sub(Oid subid, const char *nspname, const char *relname); extern PGLogicalSyncStatus *get_table_sync_status(Oid subid, const char *schemaname, const char *relname, bool missing_ok); extern void set_table_sync_status(Oid subid, const char *schemaname, const char *relname, char status, XLogRecPtr status_lsn); extern List *get_unsynced_tables(Oid subid); /* For interface compat with pgl3 */ inline static void free_sync_status(PGLogicalSyncStatus *sync) { pfree(sync); } extern bool wait_for_sync_status_change(Oid subid, const char *nspname, const char *relname, char desired_state, XLogRecPtr *status_lsn); extern void truncate_table(char *nspname, char *relname); extern List *get_subscription_tables(Oid subid); #ifdef WIN32 extern void QuoteWindowsArgv(StringInfo cmdline, const char * argv[]); #endif #endif /* PGLOGICAL_SYNC_H */ pglogical-REL2_4_1/pglogical_worker.c000066400000000000000000000444771415142317000177000ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical.c * pglogical initialization and common functionality * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "libpq/libpq-be.h" #include "access/xact.h" #include "commands/dbcommands.h" #include "storage/ipc.h" #include "storage/proc.h" #include "storage/procsignal.h" #include "storage/procarray.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/timestamp.h" #include "pgstat.h" #include "pglogical_sync.h" #include "pglogical_worker.h" typedef struct signal_worker_item { Oid subid; bool kill; } signal_worker_item; static List *signal_workers = NIL; volatile sig_atomic_t got_SIGTERM = false; PGLogicalContext *PGLogicalCtx = NULL; PGLogicalWorker *MyPGLogicalWorker = NULL; static uint16 MyPGLogicalWorkerGeneration; static bool xacthook_signal_workers = false; static bool xact_cb_installed = false; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static void pglogical_worker_detach(bool crash); static void wait_for_worker_startup(PGLogicalWorker *worker, BackgroundWorkerHandle *handle); static void signal_worker_xact_callback(XactEvent event, void *arg); void handle_sigterm(SIGNAL_ARGS) { int save_errno = errno; got_SIGTERM = true; if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; } /* * Find unused worker slot. * * The caller is responsible for locking. */ static int find_empty_worker_slot(Oid dboid) { int i; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { if (PGLogicalCtx->workers[i].worker_type == PGLOGICAL_WORKER_NONE || (PGLogicalCtx->workers[i].crashed_at != 0 && (PGLogicalCtx->workers[i].dboid == dboid || PGLogicalCtx->workers[i].dboid == InvalidOid))) return i; } return -1; } /* * Register the pglogical worker proccess. * * Return the assigned slot number. */ int pglogical_worker_register(PGLogicalWorker *worker) { BackgroundWorker bgw; PGLogicalWorker *worker_shm; BackgroundWorkerHandle *bgw_handle; int slot; int next_generation; Assert(worker->worker_type != PGLOGICAL_WORKER_NONE); LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); slot = find_empty_worker_slot(worker->dboid); if (slot == -1) { LWLockRelease(PGLogicalCtx->lock); elog(ERROR, "could not register pglogical worker: all background worker slots are already used"); } worker_shm = &PGLogicalCtx->workers[slot]; /* * Maintain a generation counter for worker registrations; see * wait_for_worker_startup(...). The counter wraps around. */ if (worker_shm->generation == PG_UINT16_MAX) next_generation = 0; else next_generation = worker_shm->generation + 1; memcpy(worker_shm, worker, sizeof(PGLogicalWorker)); worker_shm->generation = next_generation; worker_shm->crashed_at = 0; worker_shm->proc = NULL; worker_shm->worker_type = worker->worker_type; LWLockRelease(PGLogicalCtx->lock); memset(&bgw, 0, sizeof(bgw)); bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; bgw.bgw_start_time = BgWorkerStart_RecoveryFinished; snprintf(bgw.bgw_library_name, BGW_MAXLEN, EXTENSION_NAME); if (worker->worker_type == PGLOGICAL_WORKER_MANAGER) { snprintf(bgw.bgw_function_name, BGW_MAXLEN, "pglogical_manager_main"); snprintf(bgw.bgw_name, BGW_MAXLEN, "pglogical manager %u", worker->dboid); } else if (worker->worker_type == PGLOGICAL_WORKER_SYNC) { snprintf(bgw.bgw_function_name, BGW_MAXLEN, "pglogical_sync_main"); snprintf(bgw.bgw_name, BGW_MAXLEN, "pglogical sync %*s %u:%u", NAMEDATALEN - 37, shorten_hash(NameStr(worker->worker.sync.relname), NAMEDATALEN - 37), worker->dboid, worker->worker.sync.apply.subid); } else { snprintf(bgw.bgw_function_name, BGW_MAXLEN, "pglogical_apply_main"); snprintf(bgw.bgw_name, BGW_MAXLEN, "pglogical apply %u:%u", worker->dboid, worker->worker.apply.subid); } bgw.bgw_restart_time = BGW_NEVER_RESTART; bgw.bgw_notify_pid = MyProcPid; bgw.bgw_main_arg = ObjectIdGetDatum(slot); if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle)) { worker_shm->crashed_at = GetCurrentTimestamp(); ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("worker registration failed, you might want to increase max_worker_processes setting"))); } wait_for_worker_startup(worker_shm, bgw_handle); return slot; } /* * This is our own version of WaitForBackgroundWorkerStartup where we wait * until worker actually attaches to our shmem. */ static void wait_for_worker_startup(PGLogicalWorker *worker, BackgroundWorkerHandle *handle) { BgwHandleStatus status; int rc; uint16 generation = worker->generation; Assert(handle != NULL); for (;;) { pid_t pid = 0; CHECK_FOR_INTERRUPTS(); if (got_SIGTERM) { elog(DEBUG1, "pglogical supervisor exiting on SIGTERM"); proc_exit(0); } status = GetBackgroundWorkerPid(handle, &pid); if (status == BGWH_STARTED && pglogical_worker_running(worker)) { elog(DEBUG2, "%s worker at slot %zu started with pid %d and attached to shmem", pglogical_worker_type_name(worker->worker_type), (worker - &PGLogicalCtx->workers[0]), pid); break; } if (status == BGWH_STOPPED) { /* * The worker may have: * - failed to launch after registration * - launched then crashed/exited before attaching * - launched, attached, done its work, detached cleanly and exited * before we got rescheduled * - launched, attached, crashed and self-reported its crash, then * exited before we got rescheduled * * If it detached cleanly it will've set its worker type to * PGLOGICAL_WORKER_NONE, which it can't have been at entry, so we * know it must've started, attached and cleared it. * * However, someone else might've grabbed the slot and re-used it * and not exited yet, so if the worker type is not NONE we can't * tell if it's our worker that's crashed or another worker that * might still be running. We use a generation counter incremented * on registration to tell the difference. If the generation * counter has increased we know the our worker must've exited * cleanly (setting the worker type back to NONE) or self-reported * a crash (setting crashed_at), then the slot re-used by another * manager. */ if (worker->worker_type != PGLOGICAL_WORKER_NONE && worker->generation == generation && worker->crashed_at == 0) { /* * The worker we launched (same generation) crashed before * attaching to shmem so it didn't set crashed_at. Mark it * crashed so the slot can be re-used. */ elog(DEBUG2, "%s worker at slot %zu exited prematurely", pglogical_worker_type_name(worker->worker_type), (worker - &PGLogicalCtx->workers[0])); worker->crashed_at = GetCurrentTimestamp(); } else { /* * Worker exited normally or self-reported a crash and may have already been * replaced. Either way, we don't care, we're only looking for crashes before * shmem attach. */ elog(DEBUG2, "%s worker at slot %zu exited before we noticed it started", pglogical_worker_type_name(worker->worker_type), (worker - &PGLogicalCtx->workers[0])); } break; } Assert(status == BGWH_NOT_YET_STARTED || status == BGWH_STARTED); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 1000L); if (rc & WL_POSTMASTER_DEATH) proc_exit(1); ResetLatch(&MyProc->procLatch); } } /* * Cleanup function. * * Called on process exit. */ static void pglogical_worker_on_exit(int code, Datum arg) { pglogical_worker_detach(code != 0); } /* * Attach the current master process to the PGLogicalCtx. * * Called during worker startup to inform the master the worker * is ready and give it access to the worker's PGPROC. */ void pglogical_worker_attach(int slot, PGLogicalWorkerType type) { Assert(slot >= 0); Assert(slot < PGLogicalCtx->total_workers); MyProcPort = (Port *) calloc(1, sizeof(Port)); #if PG_VERSION_NUM < 90600 set_latch_on_sigusr1 = true; #endif LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); before_shmem_exit(pglogical_worker_on_exit, (Datum) 0); MyPGLogicalWorker = &PGLogicalCtx->workers[slot]; Assert(MyPGLogicalWorker->proc == NULL); Assert(MyPGLogicalWorker->worker_type == type); MyPGLogicalWorker->proc = MyProc; MyPGLogicalWorkerGeneration = MyPGLogicalWorker->generation; elog(DEBUG2, "%s worker [%d] attaching to slot %d generation %hu", pglogical_worker_type_name(type), MyProcPid, slot, MyPGLogicalWorkerGeneration); /* * So we can find workers in valgrind output, send a Valgrind client * request to print to the Valgrind log. */ VALGRIND_PRINTF("PGLOGICAL: pglogical worker %s (%s)\n", pglogical_worker_type_name(type), MyBgworkerEntry->bgw_name); LWLockRelease(PGLogicalCtx->lock); /* Make it easy to identify our processes. */ SetConfigOption("application_name", MyBgworkerEntry->bgw_name, PGC_USERSET, PGC_S_SESSION); /* Establish signal handlers. */ BackgroundWorkerUnblockSignals(); /* Make it easy to identify our processes. */ SetConfigOption("application_name", MyBgworkerEntry->bgw_name, PGC_USERSET, PGC_S_SESSION); /* Connect to database if needed. */ if (MyPGLogicalWorker->dboid != InvalidOid) { MemoryContext oldcontext; BackgroundWorkerInitializeConnectionByOid(MyPGLogicalWorker->dboid, InvalidOid #if PG_VERSION_NUM >= 110000 , 0 /* flags */ #endif ); StartTransactionCommand(); oldcontext = MemoryContextSwitchTo(TopMemoryContext); MyProcPort->database_name = pstrdup(get_database_name(MyPGLogicalWorker->dboid)); MemoryContextSwitchTo(oldcontext); CommitTransactionCommand(); } } /* * Detach the current master process from the PGLogicalCtx. * * Called during master worker exit. */ static void pglogical_worker_detach(bool crash) { /* Nothing to detach. */ if (MyPGLogicalWorker == NULL) return; LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); Assert(MyPGLogicalWorker->proc = MyProc); Assert(MyPGLogicalWorker->generation == MyPGLogicalWorkerGeneration); MyPGLogicalWorker->proc = NULL; elog(LOG, "%s worker [%d] at slot %zu generation %hu %s", pglogical_worker_type_name(MyPGLogicalWorker->worker_type), MyProcPid, MyPGLogicalWorker - &PGLogicalCtx->workers[0], MyPGLogicalWorkerGeneration, crash ? "exiting with error" : "detaching cleanly"); VALGRIND_PRINTF("PGLOGICAL: worker detaching, unclean=%d\n", crash); /* * If we crashed we need to report it. * * The crash logic only works because all of the workers are attached * to shmem and the serious crashes that we can't catch here cause * postmaster to restart whole server killing all our workers and cleaning * shmem so we start from clean state in that scenario. * * It's vital NOT to clear or change the generation field here; see * wait_for_worker_startup(...). */ if (crash) { MyPGLogicalWorker->crashed_at = GetCurrentTimestamp(); /* Manager crash, make sure supervisor notices. */ if (MyPGLogicalWorker->worker_type == PGLOGICAL_WORKER_MANAGER) PGLogicalCtx->subscriptions_changed = true; } else { /* Worker has finished work, clean up its state from shmem. */ MyPGLogicalWorker->worker_type = PGLOGICAL_WORKER_NONE; MyPGLogicalWorker->dboid = InvalidOid; } MyPGLogicalWorker = NULL; LWLockRelease(PGLogicalCtx->lock); } /* * Find the manager worker for given database. */ PGLogicalWorker * pglogical_manager_find(Oid dboid) { int i; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { if (PGLogicalCtx->workers[i].worker_type == PGLOGICAL_WORKER_MANAGER && dboid == PGLogicalCtx->workers[i].dboid) return &PGLogicalCtx->workers[i]; } return NULL; } /* * Find the apply worker for given subscription. */ PGLogicalWorker * pglogical_apply_find(Oid dboid, Oid subscriberid) { int i; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { PGLogicalWorker *w = &PGLogicalCtx->workers[i]; if (w->worker_type == PGLOGICAL_WORKER_APPLY && dboid == w->dboid && subscriberid == w->worker.apply.subid) return w; } return NULL; } /* * Find all apply workers for given database. */ List * pglogical_apply_find_all(Oid dboid) { int i; List *res = NIL; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { if (PGLogicalCtx->workers[i].worker_type == PGLOGICAL_WORKER_APPLY && dboid == PGLogicalCtx->workers[i].dboid) res = lappend(res, &PGLogicalCtx->workers[i]); } return res; } /* * Find the sync worker for given subscription and table */ PGLogicalWorker * pglogical_sync_find(Oid dboid, Oid subscriberid, const char *nspname, const char *relname) { int i; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { PGLogicalWorker *w = &PGLogicalCtx->workers[i]; if (w->worker_type == PGLOGICAL_WORKER_SYNC && dboid == w->dboid && subscriberid == w->worker.apply.subid && strcmp(NameStr(w->worker.sync.nspname), nspname) == 0 && strcmp(NameStr(w->worker.sync.relname), relname) == 0) return w; } return NULL; } /* * Find the sync worker for given subscription */ List * pglogical_sync_find_all(Oid dboid, Oid subscriberid) { int i; List *res = NIL; Assert(LWLockHeldByMe(PGLogicalCtx->lock)); for (i = 0; i < PGLogicalCtx->total_workers; i++) { PGLogicalWorker *w = &PGLogicalCtx->workers[i]; if (w->worker_type == PGLOGICAL_WORKER_SYNC && dboid == w->dboid && subscriberid == w->worker.apply.subid) res = lappend(res, w); } return res; } /* * Get worker based on slot */ PGLogicalWorker * pglogical_get_worker(int slot) { Assert(LWLockHeldByMe(PGLogicalCtx->lock)); return &PGLogicalCtx->workers[slot]; } /* * Is the worker running? */ bool pglogical_worker_running(PGLogicalWorker *worker) { return worker && worker->proc; } void pglogical_worker_kill(PGLogicalWorker *worker) { Assert(LWLockHeldByMe(PGLogicalCtx->lock)); if (pglogical_worker_running(worker)) { elog(DEBUG2, "killing pglogical %s worker [%d] at slot %zu", pglogical_worker_type_name(worker->worker_type), worker->proc->pid, (worker - &PGLogicalCtx->workers[0])); kill(worker->proc->pid, SIGTERM); } } static void signal_worker_xact_callback(XactEvent event, void *arg) { if (event == XACT_EVENT_COMMIT && xacthook_signal_workers) { PGLogicalWorker *w; ListCell *l; LWLockAcquire(PGLogicalCtx->lock, LW_EXCLUSIVE); foreach (l, signal_workers) { signal_worker_item *item = (signal_worker_item *) lfirst(l); w = pglogical_apply_find(MyDatabaseId, item->subid); if (item->kill) pglogical_worker_kill(w); else if (pglogical_worker_running(w)) { w->worker.apply.sync_pending = true; SetLatch(&w->proc->procLatch); } } PGLogicalCtx->subscriptions_changed = true; /* Signal the manager worker, if there's one */ w = pglogical_manager_find(MyDatabaseId); if (pglogical_worker_running(w)) SetLatch(&w->proc->procLatch); /* and signal the supervisor, for good measure */ if (PGLogicalCtx->supervisor) SetLatch(&PGLogicalCtx->supervisor->procLatch); LWLockRelease(PGLogicalCtx->lock); list_free_deep(signal_workers); signal_workers = NIL; xacthook_signal_workers = false; } } /* * Enqueue signal for supervisor/manager at COMMIT. */ void pglogical_subscription_changed(Oid subid, bool kill) { if (!xact_cb_installed) { RegisterXactCallback(signal_worker_xact_callback, NULL); xact_cb_installed = true; } if (OidIsValid(subid)) { MemoryContext oldcxt; signal_worker_item *item; oldcxt = MemoryContextSwitchTo(TopTransactionContext); item = palloc(sizeof(signal_worker_item)); item->subid = subid; item->kill = kill; signal_workers = lappend(signal_workers, item); MemoryContextSwitchTo(oldcxt); } xacthook_signal_workers = true; } static size_t worker_shmem_size(int nworkers) { return offsetof(PGLogicalContext, workers) + sizeof(PGLogicalWorker) * nworkers; } /* * Init shmem needed for workers. */ static void pglogical_worker_shmem_startup(void) { bool found; int nworkers; if (prev_shmem_startup_hook != NULL) prev_shmem_startup_hook(); /* * This is kludge for Windows (Postgres does not define the GUC variable * as PGDLLIMPORT) */ nworkers = atoi(GetConfigOptionByName("max_worker_processes", NULL, false)); /* Init signaling context for the various processes. */ PGLogicalCtx = ShmemInitStruct("pglogical_context", worker_shmem_size(nworkers), &found); if (!found) { PGLogicalCtx->lock = &(GetNamedLWLockTranche("pglogical"))->lock; PGLogicalCtx->supervisor = NULL; PGLogicalCtx->subscriptions_changed = false; PGLogicalCtx->total_workers = nworkers; memset(PGLogicalCtx->workers, 0, sizeof(PGLogicalWorker) * PGLogicalCtx->total_workers); } } /* * Request shmem resources for our worker management. */ void pglogical_worker_shmem_init(void) { int nworkers; Assert(process_shared_preload_libraries_in_progress); /* * This is cludge for Windows (Postgres des not define the GUC variable * as PGDDLIMPORT) */ nworkers = atoi(GetConfigOptionByName("max_worker_processes", NULL, false)); /* Allocate enough shmem for the worker limit ... */ RequestAddinShmemSpace(worker_shmem_size(nworkers)); /* * We'll need to be able to take exclusive locks so only one per-db backend * tries to allocate or free blocks from this array at once. There won't * be enough contention to make anything fancier worth doing. */ RequestNamedLWLockTranche("pglogical", 1); /* * Whether this is a first startup or crash recovery, we'll be re-initing * the bgworkers. */ PGLogicalCtx = NULL; MyPGLogicalWorker = NULL; prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pglogical_worker_shmem_startup; } const char * pglogical_worker_type_name(PGLogicalWorkerType type) { switch (type) { case PGLOGICAL_WORKER_NONE: return "none"; case PGLOGICAL_WORKER_MANAGER: return "manager"; case PGLOGICAL_WORKER_APPLY: return "apply"; case PGLOGICAL_WORKER_SYNC: return "sync"; default: Assert(false); return NULL; } } pglogical-REL2_4_1/pglogical_worker.h000066400000000000000000000062131415142317000176670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pglogical_worker.h * pglogical worker helper functions * * Copyright (c) 2015, PostgreSQL Global Development Group * * IDENTIFICATION * pglogical_worker.h * *------------------------------------------------------------------------- */ #ifndef PGLOGICAL_WORKER_H #define PGLOGICAL_WORKER_H #include "storage/lock.h" #include "pglogical.h" typedef enum { PGLOGICAL_WORKER_NONE, /* Unused slot. */ PGLOGICAL_WORKER_MANAGER, /* Manager. */ PGLOGICAL_WORKER_APPLY, /* Apply. */ PGLOGICAL_WORKER_SYNC /* Special type of Apply that synchronizes * one table. */ } PGLogicalWorkerType; typedef struct PGLogicalApplyWorker { Oid subid; /* Subscription id for apply worker. */ bool sync_pending; /* Is there new synchronization info pending?. */ XLogRecPtr replay_stop_lsn; /* Replay should stop here if defined. */ } PGLogicalApplyWorker; typedef struct PGLogicalSyncWorker { PGLogicalApplyWorker apply; /* Apply worker info, must be first. */ NameData nspname; /* Name of the schema of table to copy if any. */ NameData relname; /* Name of the table to copy if any. */ } PGLogicalSyncWorker; typedef struct PGLogicalWorker { PGLogicalWorkerType worker_type; /* Generation counter incremented at each registration */ uint16 generation; /* Pointer to proc array. NULL if not running. */ PGPROC *proc; /* Time at which worker crashed (normally 0). */ TimestampTz crashed_at; /* Database id to connect to. */ Oid dboid; /* Type-specific worker info */ union { PGLogicalApplyWorker apply; PGLogicalSyncWorker sync; } worker; } PGLogicalWorker; typedef struct PGLogicalContext { /* Write lock. */ LWLock *lock; /* Supervisor process. */ PGPROC *supervisor; /* Signal that subscription info have changed. */ bool subscriptions_changed; /* Background workers. */ int total_workers; PGLogicalWorker workers[FLEXIBLE_ARRAY_MEMBER]; } PGLogicalContext; extern PGLogicalContext *PGLogicalCtx; extern PGLogicalWorker *MyPGLogicalWorker; extern PGLogicalApplyWorker *MyApplyWorker; extern PGLogicalSubscription *MySubscription; extern volatile sig_atomic_t got_SIGTERM; extern void handle_sigterm(SIGNAL_ARGS); extern void pglogical_subscription_changed(Oid subid, bool kill); extern void pglogical_worker_shmem_init(void); extern int pglogical_worker_register(PGLogicalWorker *worker); extern void pglogical_worker_attach(int slot, PGLogicalWorkerType type); extern PGLogicalWorker *pglogical_manager_find(Oid dboid); extern PGLogicalWorker *pglogical_apply_find(Oid dboid, Oid subscriberid); extern List *pglogical_apply_find_all(Oid dboid); extern PGLogicalWorker *pglogical_sync_find(Oid dboid, Oid subid, const char *nspname, const char *relname); extern List *pglogical_sync_find_all(Oid dboid, Oid subscriberid); extern PGLogicalWorker *pglogical_get_worker(int slot); extern bool pglogical_worker_running(PGLogicalWorker *w); extern void pglogical_worker_kill(PGLogicalWorker *worker); extern const char * pglogical_worker_type_name(PGLogicalWorkerType type); #endif /* PGLOGICAL_WORKER_H */ pglogical-REL2_4_1/regress-pg_hba.conf000066400000000000000000000114111415142317000177170ustar00rootroot00000000000000# PostgreSQL Client Authentication Configuration File # =================================================== # # Refer to the "Client Authentication" section in the PostgreSQL # documentation for a complete description of this file. A short # synopsis follows. # # This file controls: which hosts are allowed to connect, how clients # are authenticated, which PostgreSQL user names they can use, which # databases they can access. Records take one of these forms: # # local DATABASE USER METHOD [OPTIONS] # host DATABASE USER ADDRESS METHOD [OPTIONS] # hostssl DATABASE USER ADDRESS METHOD [OPTIONS] # hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] # # (The uppercase items must be replaced by actual values.) # # The first field is the connection type: "local" is a Unix-domain # socket, "host" is either a plain or SSL-encrypted TCP/IP socket, # "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a # plain TCP/IP socket. # # DATABASE can be "all", "sameuser", "samerole", "replication", a # database name, or a comma-separated list thereof. The "all" # keyword does not match "replication". Access to replication # must be enabled in a separate record (see example below). # # USER can be "all", a user name, a group name prefixed with "+", or a # comma-separated list thereof. In both the DATABASE and USER fields # you can also write a file name prefixed with "@" to include names # from a separate file. # # ADDRESS specifies the set of hosts the record matches. It can be a # host name, or it is made up of an IP address and a CIDR mask that is # an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that # specifies the number of significant bits in the mask. A host name # that starts with a dot (.) matches a suffix of the actual host name. # Alternatively, you can write an IP address and netmask in separate # columns to specify the set of hosts. Instead of a CIDR-address, you # can write "samehost" to match any of the server's own IP addresses, # or "samenet" to match any address in any subnet that the server is # directly connected to. # # METHOD can be "trust", "reject", "md5", "password", "gss", "sspi", # "ident", "peer", "pam", "ldap", "radius" or "cert". Note that # "password" sends passwords in clear text; "md5" is preferred since # it sends encrypted passwords. # # OPTIONS are a set of options for the authentication in the format # NAME=VALUE. The available options depend on the different # authentication methods -- refer to the "Client Authentication" # section in the documentation for a list of which options are # available for which authentication methods. # # Database and user names containing spaces, commas, quotes and other # special characters must be quoted. Quoting one of the keywords # "all", "sameuser", "samerole" or "replication" makes the name lose # its special character, and just match a database or username with # that name. # # This file is read on server startup and when the postmaster receives # a SIGHUP signal. If you edit the file on a running system, you have # to SIGHUP the postmaster for the changes to take effect. You can # use "pg_ctl reload" to do that. # Put your actual configuration here # ---------------------------------- # # If you want to allow non-local connections, you need to add more # "host" records. In that case you will also need to make PostgreSQL # listen on a non-local interface via the listen_addresses # configuration parameter, or via the -i or -h command line switches. # CAUTION: Configuring the system for local "trust" authentication # allows any local user to connect as any PostgreSQL user, including # the database superuser. If you do not trust all your local users, # use another authentication method. # TYPE DATABASE USER ADDRESS METHOD # "local" is for Unix domain socket connections only local all all trust # IPv4 local connections: host all all 127.0.0.1/32 trust # IPv6 local connections: host all all ::1/128 trust # Allow replication connections from localhost, by a user with the # replication privilege. local replication super trust host replication super 127.0.0.1/32 trust host replication super ::1/128 trust local replication super2 trust host replication super2 127.0.0.1/32 trust host replication super2 ::1/128 trust local replication nonsuper trust host replication nonsuper 127.0.0.1/32 trust host replication nonsuper ::1/128 trust pglogical-REL2_4_1/regress-postgresql.conf000066400000000000000000000014761415142317000207140ustar00rootroot00000000000000# Configuration that affects behaviour being tested: shared_preload_libraries = 'pglogical' wal_level = logical max_wal_senders = 20 max_replication_slots = 20 max_worker_processes = 20 track_commit_timestamp = on # Purely testing related: hba_file = './regress-pg_hba.conf' DateStyle = 'ISO, DMY' log_line_prefix='[%m] [%p] [%d] ' fsync=off # Handy things to turn on when debugging #log_min_messages = debug2 #log_error_verbosity = verbose #log_statement = 'all' pglogical.synchronous_commit = true # Indirection of dsns for testing pglogical.orig_provider_dsn = 'dbname=sourcedb' pglogical.provider_dsn = 'dbname=regression' pglogical.provider1_dsn = 'dbname=regression1' pglogical.subscriber_dsn = 'dbname=postgres' # Uncomment to test SPI and multi-insert #pglogical.use_spi = true #pglogical.conflict_resolution = error pglogical-REL2_4_1/sql/000077500000000000000000000000001415142317000147615ustar00rootroot00000000000000pglogical-REL2_4_1/sql/add_table.sql000066400000000000000000000235671415142317000174160ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE TABLE public.test_publicschema(id serial primary key, data text); \c :subscriber_dsn CREATE TABLE public.test_publicschema(data text, id serial primary key); \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SCHEMA "strange.schema-IS"; CREATE TABLE public.test_nosync(id serial primary key, data text); CREATE TABLE "strange.schema-IS".test_strangeschema(id serial primary key, "S0m3th1ng" timestamptz DEFAULT '1993-01-01 00:00:00 CET'); CREATE TABLE "strange.schema-IS".test_diff_repset(id serial primary key, data text DEFAULT ''); $$); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- create some replication sets SELECT * FROM pglogical.create_replication_set('repset_test'); -- move tables to replication set that is not subscribed SELECT * FROM pglogical.replication_set_add_table('repset_test', 'test_publicschema'); SELECT * FROM pglogical.replication_set_add_table('repset_test', 'test_nosync'); SELECT * FROM pglogical.replication_set_add_table('repset_test', '"strange.schema-IS".test_strangeschema'); SELECT * FROM pglogical.replication_set_add_table('repset_test', '"strange.schema-IS".test_diff_repset'); SELECT * FROM pglogical.replication_set_add_all_sequences('repset_test', '{public}'); SELECT * FROM pglogical.replication_set_add_sequence('repset_test', pg_get_serial_sequence('"strange.schema-IS".test_strangeschema', 'id')); SELECT * FROM pglogical.replication_set_add_sequence('repset_test', pg_get_serial_sequence('"strange.schema-IS".test_diff_repset', 'id')); SELECT * FROM pglogical.replication_set_add_all_sequences('default', '{public}'); SELECT * FROM pglogical.replication_set_add_sequence('default', pg_get_serial_sequence('"strange.schema-IS".test_strangeschema', 'id')); SELECT * FROM pglogical.replication_set_add_sequence('default', pg_get_serial_sequence('"strange.schema-IS".test_diff_repset', 'id')); INSERT INTO public.test_publicschema(data) VALUES('a'); INSERT INTO public.test_publicschema(data) VALUES('b'); INSERT INTO public.test_nosync(data) VALUES('a'); INSERT INTO public.test_nosync(data) VALUES('b'); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(DEFAULT, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(DEFAuLT, DEFAULT); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.test_publicschema; \c :provider_dsn -- move tables back to the subscribed replication set SELECT * FROM pglogical.replication_set_add_table('default', 'test_publicschema', true); SELECT * FROM pglogical.replication_set_add_table('default', 'test_nosync', false); SELECT * FROM pglogical.replication_set_add_table('default', '"strange.schema-IS".test_strangeschema', true); \c :subscriber_dsn SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_publicschema'); SELECT pglogical.wait_for_table_sync_complete('test_subscription', '"strange.schema-IS".test_strangeschema'); RESET statement_timeout; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; \c :provider_dsn DO $$ -- give it 10 seconds to synchronize the tables BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots) = 1 THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT count(1) FROM pg_replication_slots; INSERT INTO public.test_publicschema VALUES(3, 'c'); INSERT INTO public.test_publicschema VALUES(4, 'd'); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(3, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(4, DEFAULT); SELECT pglogical.synchronize_sequence(c.oid) FROM pg_class c, pg_namespace n WHERE c.relkind = 'S' AND c.relnamespace = n.oid AND n.nspname IN ('public', 'strange.schema-IS'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.test_publicschema; SELECT * FROM "strange.schema-IS".test_strangeschema; SELECT * FROM pglogical.alter_subscription_synchronize('test_subscription'); BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_nosync'); COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; SELECT * FROM public.test_nosync; DELETE FROM public.test_publicschema WHERE id > 1; SELECT * FROM public.test_publicschema; SELECT * FROM pglogical.alter_subscription_resynchronize_table('test_subscription', 'test_publicschema'); BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_publicschema'); COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; SELECT * FROM public.test_publicschema; \x SELECT nspname, relname, status IN ('synchronized', 'replicating') FROM pglogical.show_subscription_table('test_subscription', 'test_publicschema'); \x BEGIN; SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'repset_test'); SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default'); COMMIT; DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; \c :provider_dsn SELECT * FROM pglogical.replication_set_remove_table('repset_test', '"strange.schema-IS".test_strangeschema'); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(1); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(2); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(5, DEFAULT); INSERT INTO "strange.schema-IS".test_strangeschema VALUES(6, DEFAULT); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; SELECT * FROM "strange.schema-IS".test_strangeschema; \c :provider_dsn SELECT * FROM pglogical.alter_replication_set('repset_test', replicate_insert := false, replicate_update := false, replicate_delete := false, replicate_truncate := false); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(3); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(4); UPDATE "strange.schema-IS".test_diff_repset SET data = 'data'; DELETE FROM "strange.schema-IS".test_diff_repset WHERE id < 3; TRUNCATE "strange.schema-IS".test_diff_repset; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; \c :provider_dsn SELECT * FROM pglogical.alter_replication_set('repset_test', replicate_insert := true, replicate_truncate := true); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(5); INSERT INTO "strange.schema-IS".test_diff_repset VALUES(6); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; \c :provider_dsn TRUNCATE "strange.schema-IS".test_diff_repset; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM "strange.schema-IS".test_diff_repset; SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default'); DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT N.nspname AS schemaname, C.relname AS tablename, (nextval(C.oid) > 1000) as synced FROM pg_class C JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'S' AND N.nspname IN ('public', 'strange.schema-IS') ORDER BY 1, 2; \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pg_stat_replication) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema CASCADE; DROP TABLE public.test_nosync CASCADE; DROP SCHEMA "strange.schema-IS" CASCADE; $$); SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.synctest(a int primary key, b text); $$); SELECT * FROM pglogical.replication_set_add_table('repset_test', 'synctest', synchronize_data := false); INSERT INTO synctest VALUES (1, '1'); -- no way to see if this worked currently, but if one can manually check -- if there is conflict in log or not (conflict = bad here) SELECT pglogical.replicate_ddl_command($$ SELECT pg_sleep(5); UPDATE public.synctest SET b = md5(a::text); $$); INSERT INTO synctest VALUES (2, '2'); \c :subscriber_dsn SELECT * FROM pglogical.alter_subscription_resynchronize_table('test_subscription', 'synctest'); BEGIN; SET statement_timeout = '20s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'synctest'); COMMIT; \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); SELECT * FROM synctest; \c :subscriber_dsn SELECT * FROM synctest; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.synctest CASCADE; $$); \c :subscriber_dsn -- this is to reorder repsets to default order BEGIN; SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default'); SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'ddl_sql'); SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'default_insert_only'); SELECT * FROM pglogical.alter_subscription_remove_replication_set('test_subscription', 'repset_test'); SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default'); SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'default_insert_only'); SELECT * FROM pglogical.alter_subscription_add_replication_set('test_subscription', 'ddl_sql'); COMMIT; pglogical-REL2_4_1/sql/apply_delay.sql000066400000000000000000000057261415142317000200170ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; \gset \c :provider_dsn SELECT * FROM pglogical.create_replication_set('delay'); \c :subscriber_dsn CREATE or REPLACE function int2interval (x integer) returns interval as $$ select $1*'1 sec'::interval $$ language sql; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_delay', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{delay}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false, apply_delay := int2interval(2) -- 2 seconds ); BEGIN; SET LOCAL statement_timeout = '30s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription_delay'); COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; SELECT status FROM pglogical.show_subscription_status() WHERE subscription_name = 'test_subscription_delay'; -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, database, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; CREATE TABLE public.timestamps ( id text primary key, ts timestamptz ); SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); $$); -- clear old applies, from any previous tests etc. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO timestamps VALUES ('ts1', CURRENT_TIMESTAMP); SELECT * FROM pglogical.replication_set_add_table('delay', 'basic_dml1'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO timestamps VALUES ('ts2', CURRENT_TIMESTAMP); INSERT INTO basic_dml1(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO timestamps VALUES ('ts3', CURRENT_TIMESTAMP); SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts1'))) :: integer >= 2 as ddl_replication_delayed; SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts3')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2'))) :: integer >= 2 as inserts_replication_delayed; \c :subscriber_dsn SELECT * FROM basic_dml1; SELECT pglogical.drop_subscription('test_subscription_delay'); \c :provider_dsn \set VERBOSITY terse SELECT * FROM pglogical.drop_replication_set('delay'); DROP TABLE public.timestamps CASCADE; SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml1 CASCADE; $$); pglogical-REL2_4_1/sql/att_list.sql000066400000000000000000000102231415142317000173230ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); -- fails as primary key is not included SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', columns := '{ data, something}'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', columns := '{id, data, something}'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn CREATE TABLE public.basic_dml ( id serial primary key, data text, something interval, subonly integer, subonly_def integer DEFAULT 99 ); \c :provider_dsn -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, something = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; -- update multiple rows \c :provider_dsn SELECT * FROM basic_dml order by id; UPDATE basic_dml SET data = data || other::text; SELECT * FROM basic_dml order by id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT * FROM basic_dml order by id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; -- delete one row \c :provider_dsn DELETE FROM basic_dml WHERE id = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; -- copy \c :provider_dsn \COPY basic_dml FROM STDIN WITH CSV 9000,1,aaa,1 hour 9001,2,bbb,2 years 9002,3,ccc,3 minutes 9003,4,ddd,4 days \. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; \c :provider_dsn -- drop columns being filtered at provider -- even primary key can be dropped ALTER TABLE basic_dml DROP COLUMN id; ALTER TABLE basic_dml DROP COLUMN data; \c :subscriber_dsn SELECT id, data, something FROM basic_dml ORDER BY id; \c :provider_dsn -- add column to table at provider ALTER TABLE basic_dml ADD COLUMN data1 text; INSERT INTO basic_dml(other, data1, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval); -- inserts after dropping primary key still reach the subscriber. UPDATE basic_dml set something = something - '10 seconds'::interval; DELETE FROM basic_dml WHERE other = 2; SELECT * FROM basic_dml ORDER BY other; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml', ARRAY['default']); \c :subscriber_dsn -- verify that columns are not automatically added for filtering unless told so. SELECT * FROM pglogical.show_subscription_table('test_subscription', 'basic_dml'); SELECT * FROM basic_dml ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); pglogical-REL2_4_1/sql/basic.sql000066400000000000000000000054311415142317000165660ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn ALTER TABLE public.basic_dml ADD COLUMN subonly integer; ALTER TABLE public.basic_dml ADD COLUMN subonly_def integer DEFAULT 99; \c :provider_dsn -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, something = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; -- update multiple rows \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something, subonly, subonly_def FROM basic_dml ORDER BY id; -- delete one row \c :provider_dsn DELETE FROM basic_dml WHERE id = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; -- copy \c :provider_dsn \COPY basic_dml FROM STDIN WITH CSV 9000,1,aaa,1 hour 9001,2,bbb,2 years 9002,3,ccc,3 minutes 9003,4,ddd,4 days \. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); pglogical-REL2_4_1/sql/bidirectional.sql000066400000000000000000000045001415142317000203110ustar00rootroot00000000000000 SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT E'\'' || current_database() || E'\'' AS pubdb; \gset \c :provider_dsn DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_bidirectional', provider_dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, synchronize_data := false, forward_origins := '{}'); BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_bidirectional'); COMMIT; \c :subscriber_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :provider_dsn SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; UPDATE basic_dml SET other = id, something = something - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, something = something + '10 seconds'::interval WHERE id > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :provider_dsn SELECT id, other, data, something FROM basic_dml ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); SELECT pglogical.drop_subscription('test_bidirectional'); SET client_min_messages = 'warning'; DROP EXTENSION IF EXISTS pglogical_origin; \c :subscriber_dsn \a SELECT slot_name FROM pg_replication_slots WHERE database = current_database(); SELECT count(*) FROM pg_stat_replication WHERE application_name = 'test_bidirectional'; pglogical-REL2_4_1/sql/column_filter.sql000066400000000000000000000124311415142317000203450ustar00rootroot00000000000000-- basic builtin datatypes SELECT * FROM pglogical_regress_variables() \gset -- create and populate table at provider \c :provider_dsn CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); \c :subscriber_dsn -- create table on subscriber to receive replicated filtered data from provider -- there are some extra columns too, and we omit 'other' as a non-replicated -- table on upstream only. CREATE TABLE public.basic_dml ( id serial primary key, data text, something interval, subonly integer, subonly_def integer DEFAULT 99 ); SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; \c :provider_dsn -- Fails: the column filter list must include the key SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{data, something}'); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; -- Fails: the column filter list may not include cols that are not in the table SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{data, something, nosuchcol}'); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; -- At provider, add table to replication set, with filtered columns SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{id, data, something}'); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); SELECT id, data, something FROM basic_dml ORDER BY id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'basic_dml'); COMMIT; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; -- data should get replicated to subscriber SELECT id, data, something FROM basic_dml ORDER BY id; \c :provider_dsn -- Adding a table that's already selectively replicated fails \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true); \set VERBOSITY default SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; -- So does trying to re-add to change the column set \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', synchronize_data := true, columns := '{id, data}'); \set VERBOSITY default SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; -- Shouldn't be able to drop a replicated col in a rel -- but due to RM#5916 you can BEGIN; ALTER TABLE public.basic_dml DROP COLUMN data; SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); ROLLBACK; -- Even when wrapped (RM#5916) BEGIN; SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.basic_dml DROP COLUMN data; $$); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); ROLLBACK; -- CASCADE should be allowed though BEGIN; ALTER TABLE public.basic_dml DROP COLUMN data CASCADE; SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; ROLLBACK; BEGIN; SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.basic_dml DROP COLUMN data CASCADE; $$); SELECT nspname, relname, att_list, has_row_filter FROM pglogical.show_repset_table_info('basic_dml'::regclass, ARRAY['default']); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relid = 'public.basic_dml'::regclass; ROLLBACK; -- We can drop a non-replicated col. We must not replicate this DDL because in -- this case the downstream doesn't have the 'other' column and apply will -- fail. ALTER TABLE public.basic_dml DROP COLUMN other; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; $$); pglogical-REL2_4_1/sql/conflict_secondary_unique.sql000066400000000000000000000032061415142317000227410ustar00rootroot00000000000000--PRIMARY KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- Test conflicts where a secondary unique constraint with a predicate exits, -- ensuring we don't generate false conflicts. SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.secondary_unique_pred ( a integer PRIMARY KEY, b integer NOT NULL, check_unique boolean NOT NULL ); CREATE UNIQUE INDEX ON public.secondary_unique_pred (b) WHERE (check_unique); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'secondary_unique_pred'); INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (1, 1, false); INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (2, 1, false); INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (3, 2, true); -- must fail INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (5, 2, true); SELECT * FROM secondary_unique_pred ORDER BY a; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM secondary_unique_pred ORDER BY a; \c :provider_dsn -- This line doesn't conflict on the provider. On the subscriber -- we must not detect a conflict on (b), since the existing local -- row matches (check_unique) but the new remote row doesn't. So -- this must get applied. INSERT INTO secondary_unique_pred (a, b, check_unique) VALUES (4, 2, false); SELECT * FROM secondary_unique_pred ORDER BY a; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM secondary_unique_pred ORDER BY a; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.secondary_unique_pred CASCADE; $$); pglogical-REL2_4_1/sql/copy.sql000066400000000000000000000020021415142317000164460ustar00rootroot00000000000000--test COPY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.x ( a serial primary key, b int, c text not null default 'stuff', d text, e text ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'x'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); COPY x (a, b, c, d, e) from stdin; 9999 \N \\N \NN \N 10000 21 31 41 51 \. COPY x (b, d) from stdin; 1 test_1 \. COPY x (b, d) from stdin; 2 test_2 3 test_3 4 test_4 5 test_5 6 test_6 7 test_7 8 test_8 9 test_9 10 test_10 11 test_11 12 test_12 13 test_13 14 test_14 15 test_15 \. COPY x (a, b, c, d, e) from stdin; 10001 22 32 42 52 10002 23 33 43 53 10003 24 34 44 54 10004 25 35 45 55 10005 26 36 46 56 \. SELECT * FROM x ORDER BY a; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM x ORDER BY a; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.x CASCADE; $$); pglogical-REL2_4_1/sql/drop.sql000066400000000000000000000020721415142317000164470ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider'); SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; \c :subscriber_dsn SELECT * FROM pglogical.drop_subscription('test_subscription'); SELECT * FROM pglogical.drop_node(node_name := 'test_subscriber'); \c :provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider'); \c :subscriber_dsn DROP OWNED BY nonsuper, super CASCADE; \c :provider_dsn DROP OWNED BY nonsuper, super CASCADE; \c :provider1_dsn DROP OWNED BY nonsuper, super CASCADE; \c :orig_provider_dsn DROP OWNED BY nonsuper, super CASCADE; \c :subscriber_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :provider1_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; \c :orig_provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonsuper, super; pglogical-REL2_4_1/sql/extended.sql000066400000000000000000000473621415142317000173160ustar00rootroot00000000000000-- complex datatype handling SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.tst_one_array ( a INTEGER PRIMARY KEY, b INTEGER[] ); CREATE TABLE public.tst_arrays ( a INTEGER[] PRIMARY KEY, b TEXT[], c FLOAT[], d INTERVAL[] ); CREATE TYPE public.tst_enum_t AS ENUM ('a', 'b', 'c', 'd', 'e'); CREATE TABLE public.tst_one_enum ( a INTEGER PRIMARY KEY, b public.tst_enum_t ); CREATE TABLE public.tst_enums ( a public.tst_enum_t PRIMARY KEY, b public.tst_enum_t[] ); CREATE TYPE public.tst_comp_basic_t AS (a FLOAT, b TEXT, c INTEGER); CREATE TYPE public.tst_comp_enum_t AS (a FLOAT, b public.tst_enum_t, c INTEGER); CREATE TYPE public.tst_comp_enum_array_t AS (a FLOAT, b public.tst_enum_t[], c INTEGER); CREATE TABLE public.tst_one_comp ( a INTEGER PRIMARY KEY, b public.tst_comp_basic_t ); CREATE TABLE public.tst_comps ( a public.tst_comp_basic_t PRIMARY KEY, b public.tst_comp_basic_t[] ); CREATE TABLE public.tst_comp_enum ( a INTEGER PRIMARY KEY, b public.tst_comp_enum_t ); CREATE TABLE public.tst_comp_enum_array ( a public.tst_comp_enum_t PRIMARY KEY, b public.tst_comp_enum_t[] ); CREATE TABLE public.tst_comp_one_enum_array ( a INTEGER PRIMARY KEY, b public.tst_comp_enum_array_t ); CREATE TABLE public.tst_comp_enum_what ( a public.tst_comp_enum_array_t PRIMARY KEY, b public.tst_comp_enum_array_t[] ); CREATE TYPE public.tst_comp_mix_t AS ( a public.tst_comp_basic_t, b public.tst_comp_basic_t[], c public.tst_enum_t, d public.tst_enum_t[] ); CREATE TABLE public.tst_comp_mix_array ( a public.tst_comp_mix_t PRIMARY KEY, b public.tst_comp_mix_t[] ); CREATE TABLE public.tst_range ( a INTEGER PRIMARY KEY, b int4range ); CREATE TABLE public.tst_range_array ( a INTEGER PRIMARY KEY, b TSTZRANGE, c int8range[] ); $$); SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- test_tbl_one_array_col INSERT INTO tst_one_array (a, b) VALUES (1, '{1, 2, 3}'), (2, '{2, 3, 1}'), (3, '{3, 2, 1}'), (4, '{4, 3, 2}'), (5, '{5, NULL, 3}'); -- test_tbl_arrays INSERT INTO tst_arrays (a, b, c, d) VALUES ('{1, 2, 3}', '{"a", "b", "c"}', '{1.1, 2.2, 3.3}', '{"1 day", "2 days", "3 days"}'), ('{2, 3, 1}', '{"b", "c", "a"}', '{2.2, 3.3, 1.1}', '{"2 minutes", "3 minutes", "1 minute"}'), ('{3, 1, 2}', '{"c", "a", "b"}', '{3.3, 1.1, 2.2}', '{"3 years", "1 year", "2 years"}'), ('{4, 1, 2}', '{"d", "a", "b"}', '{4.4, 1.1, 2.2}', '{"4 years", "1 year", "2 years"}'), ('{5, NULL, NULL}', '{"e", NULL, "b"}', '{5.5, 1.1, NULL}', '{"5 years", NULL, NULL}'); -- test_tbl_single_enum INSERT INTO tst_one_enum (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, NULL); -- test_tbl_enums INSERT INTO tst_enums (a, b) VALUES ('a', '{b, c}'), ('b', '{c, a}'), ('c', '{b, a}'), ('d', '{c, b}'), ('e', '{d, NULL}'); -- test_tbl_single_composites INSERT INTO tst_one_comp (a, b) VALUES (1, ROW(1.0, 'a', 1)), (2, ROW(2.0, 'b', 2)), (3, ROW(3.0, 'c', 3)), (4, ROW(4.0, 'd', 4)), (5, ROW(NULL, NULL, 5)); -- test_tbl_composites INSERT INTO tst_comps (a, b) VALUES (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_basic_t]), (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_basic_t]), (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_basic_t]), (ROW(4.0, 'd', 4), ARRAY[ROW(4, 'd', 3)::tst_comp_basic_t]), (ROW(5.0, 'e', NULL), ARRAY[NULL, ROW(5, NULL, 5)::tst_comp_basic_t]); -- test_tbl_composite_with_enums INSERT INTO tst_comp_enum (a, b) VALUES (1, ROW(1.0, 'a', 1)), (2, ROW(2.0, 'b', 2)), (3, ROW(3.0, 'c', 3)), (4, ROW(4.0, 'd', 4)), (5, ROW(NULL, 'e', NULL)); -- test_tbl_composite_with_enums_array INSERT INTO tst_comp_enum_array (a, b) VALUES (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t]), (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_enum_t]), (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_enum_t]), (ROW(4.0, 'd', 3), ARRAY[ROW(3, 'd', 3)::tst_comp_enum_t]), (ROW(5.0, 'e', 3), ARRAY[ROW(3, 'e', 3)::tst_comp_enum_t, NULL]); -- test_tbl_composite_with_single_enums_array_in_composite INSERT INTO tst_comp_one_enum_array (a, b) VALUES (1, ROW(1.0, '{a, b, c}', 1)), (2, ROW(2.0, '{a, b, c}', 2)), (3, ROW(3.0, '{a, b, c}', 3)), (4, ROW(4.0, '{c, b, d}', 4)), (5, ROW(5.0, '{NULL, e, NULL}', 5)); -- test_tbl_composite_with_enums_array_in_composite INSERT INTO tst_comp_enum_what (a, b) VALUES (ROW(1.0, '{a, b, c}', 1), ARRAY[ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t]), (ROW(2.0, '{b, c, a}', 2), ARRAY[ROW(2, '{b, c, a}', 1)::tst_comp_enum_array_t]), (ROW(3.0, '{c, a, b}', 1), ARRAY[ROW(3, '{c, a, b}', 1)::tst_comp_enum_array_t]), (ROW(4.0, '{c, b, d}', 4), ARRAY[ROW(4, '{c, b, d}', 4)::tst_comp_enum_array_t]), (ROW(5.0, '{c, NULL, b}', NULL), ARRAY[ROW(5, '{c, e, b}', 1)::tst_comp_enum_array_t]); -- test_tbl_mixed_composites INSERT INTO tst_comp_mix_array (a, b) VALUES (ROW( ROW(1,'a',1), ARRAY[ROW(1,'a',1)::tst_comp_basic_t, ROW(2,'b',2)::tst_comp_basic_t], 'a', '{a,b,NULL,c}'), ARRAY[ ROW( ROW(1,'a',1), ARRAY[ ROW(1,'a',1)::tst_comp_basic_t, ROW(2,'b',2)::tst_comp_basic_t, NULL ], 'a', '{a,b,c}' )::tst_comp_mix_t ] ); -- test_tbl_range INSERT INTO tst_range (a, b) VALUES (1, '[1, 10]'), (2, '[2, 20]'), (3, '[3, 30]'), (4, '[4, 40]'), (5, '[5, 50]'); -- test_tbl_range_array INSERT INTO tst_range_array (a, b, c) VALUES (1, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), '{"[1,2]", "[10,20]"}'), (2, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '2 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[2,3]", "[20,30]"}'), (3, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '3 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[3,4]"}'), (4, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '4 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[4,5]", NULL, "[40,50]"}'), (5, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; SELECT a, b, c, d FROM tst_arrays ORDER BY a; SELECT a, b FROM tst_one_enum ORDER BY a; SELECT a, b FROM tst_enums ORDER BY a; SELECT a, b FROM tst_one_comp ORDER BY a; SELECT a, b FROM tst_comps ORDER BY a; SELECT a, b FROM tst_comp_enum ORDER BY a; SELECT a, b FROM tst_comp_enum_array ORDER BY a; SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; SELECT a, b FROM tst_comp_enum_what ORDER BY a; SELECT a, b FROM tst_comp_mix_array ORDER BY a; SELECT a, b FROM tst_range ORDER BY a; SELECT a, b, c FROM tst_range_array ORDER BY a; -- test_tbl_one_array_col \c :provider_dsn UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; \c :provider_dsn UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; \c :provider_dsn DELETE FROM tst_one_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; \c :provider_dsn DELETE FROM tst_one_array WHERE b = '{2, 3, 1}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; \c :provider_dsn DELETE FROM tst_one_array WHERE 1 = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_array ORDER BY a; -- test_tbl_arrays \c :provider_dsn UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; \c :provider_dsn UPDATE tst_arrays SET b = '{"c", "d", "e"}', c = '{3.0, 4.0, 5.0}', d = '{"3 day 1 second", "4 days 2 seconds", "5 days 3 second"}' WHERE a[1] > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; \c :provider_dsn DELETE FROM tst_arrays WHERE a = '{1, 2, 3}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; \c :provider_dsn DELETE FROM tst_arrays WHERE a[1] = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; \c :provider_dsn DELETE FROM tst_arrays WHERE b[1] = 'c'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c, d FROM tst_arrays ORDER BY a; -- test_tbl_single_enum \c :provider_dsn UPDATE tst_one_enum SET b = 'c' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; \c :provider_dsn UPDATE tst_one_enum SET b = NULL WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; \c :provider_dsn DELETE FROM tst_one_enum WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; \c :provider_dsn DELETE FROM tst_one_enum WHERE b = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_one_enum ORDER BY a; -- test_tbl_enums \c :provider_dsn UPDATE tst_enums SET b = '{e, NULL}' WHERE a = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_enums; \c :provider_dsn UPDATE tst_enums SET b = '{e, d}' WHERE a > 'c'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_enums; \c :provider_dsn DELETE FROM tst_enums WHERE a = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_enums; \c :provider_dsn DELETE FROM tst_enums WHERE 'c' = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_enums; \c :provider_dsn DELETE FROM tst_enums WHERE b[1] = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_enums; -- test_tbl_single_composites \c :provider_dsn UPDATE tst_one_comp SET b = ROW(1.0, 'A', 1) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; \c :provider_dsn UPDATE tst_one_comp SET b = ROW(NULL, 'x', -1) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; \c :provider_dsn DELETE FROM tst_one_comp WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; \c :provider_dsn DELETE FROM tst_one_comp WHERE (b).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_one_comp ORDER BY a; -- test_tbl_composites \c :provider_dsn UPDATE tst_comps SET b = ARRAY[ROW(9, 'x', -1)::tst_comp_basic_t] WHERE (a).a = 1.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comps ORDER BY a; \c :provider_dsn UPDATE tst_comps SET b = ARRAY[NULL, ROW(9, 'x', NULL)::tst_comp_basic_t] WHERE (a).a > 3.9; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comps ORDER BY a; \c :provider_dsn DELETE FROM tst_comps WHERE (a).b = 'a'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; \c :provider_dsn DELETE FROM tst_comps WHERE (b[1]).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; \c :provider_dsn DELETE FROM tst_comps WHERE ROW(3, 'c', 3)::tst_comp_basic_t = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comps ORDER BY a; -- test_tbl_composite_with_enums \c :provider_dsn UPDATE tst_comp_enum SET b = ROW(1.0, NULL, NULL) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum ORDER BY a; \c :provider_dsn UPDATE tst_comp_enum SET b = ROW(4.0, 'd', 44) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum WHERE (b).a = 2.0; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum ORDER BY a; -- test_tbl_composite_with_enums_array \c :provider_dsn UPDATE tst_comp_enum_array SET b = ARRAY[NULL, ROW(3, 'd', 3)::tst_comp_enum_t] WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum_array ORDER BY a; \c :provider_dsn UPDATE tst_comp_enum_array SET b = ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t, ROW(2, 'b', 2)::tst_comp_enum_t] WHERE (a).a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE (b[1]).b = 'b'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_array WHERE ROW(3, 'c', 3)::tst_comp_enum_t = ANY(b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_array ORDER BY a; -- test_tbl_composite_with_single_enums_array_in_composite \c :provider_dsn UPDATE tst_comp_one_enum_array SET b = ROW(1.0, '{a, e, c}', NULL) WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_one_enum_array ORDER BY a; \c :provider_dsn UPDATE tst_comp_one_enum_array SET b = ROW(4.0, '{c, b, d}', 4) WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_one_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE (b).c = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_one_enum_array WHERE 'a' = ANY((b).b); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_one_enum_array ORDER BY a; -- test_tbl_composite_with_enums_array_in_composite \c :provider_dsn UPDATE tst_comp_enum_what SET b = ARRAY[NULL, ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t, ROW(NULL, '{a, e, c}', 2)::tst_comp_enum_array_t] WHERE (a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum_what ORDER BY a; \c :provider_dsn UPDATE tst_comp_enum_what SET b = ARRAY[ROW(5, '{a, b, c}', 5)::tst_comp_enum_array_t] WHERE (a).a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b from tst_comp_enum_what ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (b[1]).a = 2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_enum_what WHERE (b[1]).b = '{c, a, b}'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_enum_what ORDER BY a; -- test_tbl_mixed_composites \c :provider_dsn UPDATE tst_comp_mix_array SET b[2] = NULL WHERE ((a).a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_mix_array ORDER BY a; \c :provider_dsn DELETE FROM tst_comp_mix_array WHERE ((a).a).a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_comp_mix_array ORDER BY a; -- test_tbl_range \c :provider_dsn UPDATE tst_range SET b = '[100, 1000]' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; \c :provider_dsn UPDATE tst_range SET b = '(1, 90)' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; \c :provider_dsn DELETE FROM tst_range WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; \c :provider_dsn DELETE FROM tst_range WHERE b = '[2, 20]'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; \c :provider_dsn DELETE FROM tst_range WHERE '[10,20]' && b; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b FROM tst_range ORDER BY a; -- test_tbl_range_array \c :provider_dsn UPDATE tst_range_array SET c = '{"[100, 1000]"}' WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; \c :provider_dsn UPDATE tst_range_array SET b = tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), c = '{NULL, "[11,9999999]"}' WHERE a > 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; \c :provider_dsn DELETE FROM tst_range_array WHERE a = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; \c :provider_dsn DELETE FROM tst_range_array WHERE b = tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz - interval '2 days', 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; \c :provider_dsn DELETE FROM tst_range_array WHERE tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 05 00:00:00 2014 CEST'::timestamptz) && b; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT a, b, c FROM tst_range_array ORDER BY a; \c :provider_dsn -- Verify that swap_relation_files(...) breaks replication -- as invoked by CLUSTER, VACUUM FULL, or REFRESH MATERIALIZED VIEW VACUUM FULL tst_one_array; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.tst_one_array CASCADE; DROP TABLE public.tst_arrays CASCADE; DROP TABLE public.tst_one_enum CASCADE; DROP TABLE public.tst_enums CASCADE; DROP TABLE public.tst_one_comp CASCADE; DROP TABLE public.tst_comps CASCADE; DROP TABLE public.tst_comp_enum CASCADE; DROP TABLE public.tst_comp_enum_array CASCADE; DROP TABLE public.tst_comp_one_enum_array CASCADE; DROP TABLE public.tst_comp_enum_what CASCADE; DROP TABLE public.tst_comp_mix_array CASCADE; DROP TABLE public.tst_range CASCADE; DROP TABLE public.tst_range_array CASCADE; DROP TYPE public.tst_comp_mix_t; DROP TYPE public.tst_comp_enum_array_t; DROP TYPE public.tst_comp_enum_t; DROP TYPE public.tst_comp_basic_t; DROP TYPE public.tst_enum_t; $$); pglogical-REL2_4_1/sql/foreign_key.sql000066400000000000000000000023761415142317000200130ustar00rootroot00000000000000--FOREIGN KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.f1k_products ( product_no integer PRIMARY KEY, product_id integer, name text, price numeric ); CREATE TABLE public.f1k_orders ( order_id integer, product_no integer REFERENCES public.f1k_products (product_no), quantity integer ); --pass $$); SELECT * FROM pglogical.replication_set_add_table('default', 'f1k_products'); SELECT * FROM pglogical.replication_set_add_table('default_insert_only', 'f1k_orders'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT into public.f1k_products VALUES (1, 1, 'product1', 1.20); INSERT into public.f1k_products VALUES (2, 2, 'product2', 2.40); INSERT into public.f1k_orders VALUES (300, 1, 4); INSERT into public.f1k_orders VALUES (22, 2, 14); INSERT into public.f1k_orders VALUES (23, 2, 24); INSERT into public.f1k_orders VALUES (24, 2, 40); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.f1k_products; SELECT * FROM public.f1k_orders; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.f1k_orders CASCADE; DROP TABLE public.f1k_products CASCADE; $$); pglogical-REL2_4_1/sql/functions.sql000066400000000000000000000167301415142317000175210ustar00rootroot00000000000000--Immutable, volatile functions and nextval in DEFAULT clause SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE TABLE public.funct2( a integer, b integer, c integer DEFAULT public.add(10,12 ) ) ; $$); SELECT * FROM pglogical.replication_set_add_table('default_insert_only', 'public.funct2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO public.funct2(a,b) VALUES (1,2);--c should be 22 INSERT INTO public.funct2(a,b,c) VALUES (3,4,5);-- c should be 5 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * from public.funct2; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ create or replace function public.get_curr_century() returns double precision as 'SELECT EXTRACT(CENTURY FROM NOW());' language sql volatile; CREATE TABLE public.funct5( a integer, b integer, c double precision DEFAULT public.get_curr_century() ); $$); SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO public.funct5(a,b) VALUES (1,2);--c should be e.g. 21 for 2015 INSERT INTO public.funct5(a,b,c) VALUES (3,4,20);-- c should be 20 SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * from public.funct5; --nextval check \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SEQUENCE public.INSERT_SEQ; CREATE TABLE public.funct ( a integer, b INT DEFAULT nextval('public.insert_seq') ); $$); SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO public.funct (a) VALUES (1); INSERT INTO public.funct (a) VALUES (2); INSERT INTO public.funct (a) VALUES (3); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.funct; \c :provider_dsn BEGIN; COMMIT;--empty transaction SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.funct; -- test replication where the destination table has extra (nullable) columns that are not in the origin table \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); SELECT * FROM pglogical.replication_set_add_table('default', 'nullcheck_tbl'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; ALTER TABLE public.nullcheck_tbl ADD COLUMN name1 text; SELECT * FROM public.nullcheck_tbl; \c :provider_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.nullcheck_tbl; \c :provider_dsn UPDATE public.nullcheck_tbl SET name='name31' where id = 3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn INSERT INTO public.nullcheck_tbl(id,id1,name) VALUES (6,6,'name6'); SELECT * FROM public.nullcheck_tbl; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.not_nullcheck_tbl( id integer PRIMARY KEY, id1 integer, name text ) ; $$); SELECT * FROM pglogical.replication_set_add_table('default', 'not_nullcheck_tbl'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null; \c :provider_dsn SELECT quote_literal(pg_current_xlog_location()) as curr_lsn \gset INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (1,1,'name1'); INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (2,2,'name2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, :curr_lsn); \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (3,3,'name3'); SELECT * FROM public.not_nullcheck_tbl; SELECT pglogical.alter_subscription_disable('test_subscription', true); \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = false) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default'); \c :subscriber_dsn SELECT pglogical.alter_subscription_enable('test_subscription', true); ALTER TABLE public.not_nullcheck_tbl ALTER COLUMN id2 SET default 99; \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = true) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (4,4,'name4'); -- id2 will be 99 on subscriber ALTER TABLE public.not_nullcheck_tbl ADD COLUMN id2 integer not null default 0; INSERT INTO public.not_nullcheck_tbl(id,id1,name) VALUES (5,5,'name5'); -- id2 will be 0 on both SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM public.not_nullcheck_tbl; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE FUNCTION public.some_prime_numbers() RETURNS SETOF integer LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ VALUES (2), (3), (5), (7), (11), (13), (17), (19), (23), (29), (31), (37), (41), (43), (47), (53), (59), (61), (67), (71), (73), (79), (83), (89), (97) $_$; CREATE FUNCTION public.is_prime_lt_100(integer) RETURNS boolean LANGUAGE sql IMMUTABLE STRICT LEAKPROOF AS $_$ SELECT EXISTS (SELECT FROM public.some_prime_numbers() s(p) WHERE p = $1) $_$; CREATE DOMAIN public.prime AS integer CONSTRAINT prime_check CHECK(public.is_prime_lt_100(VALUE)); CREATE TABLE public.prime_tbl ( num public.prime NOT NULL, PRIMARY KEY(num) ); INSERT INTO public.prime_tbl (num) VALUES(17), (31), (79); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'public.prime_tbl'); DELETE FROM public.prime_tbl WHERE num = 31; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT num FROM public.prime_tbl; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.funct CASCADE; DROP SEQUENCE public.INSERT_SEQ; DROP TABLE public.funct2 CASCADE; DROP TABLE public.funct5 CASCADE; DROP FUNCTION public.get_curr_century(); DROP FUNCTION public.add(integer, integer); DROP TABLE public.nullcheck_tbl CASCADE; DROP TABLE public.not_nullcheck_tbl CASCADE; DROP TABLE public.prime_tbl CASCADE; DROP DOMAIN public.prime; DROP FUNCTION public.is_prime_lt_100(integer); DROP FUNCTION public.some_prime_numbers(); $$); pglogical-REL2_4_1/sql/huge_tx.sql000066400000000000000000000027151415142317000171520ustar00rootroot00000000000000-- test huge transactions SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- lots of small rows replication with DDL outside transaction SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.a_huge ( id integer primary key, id1 integer, data text default 'data', data1 text default 'data1' ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'a_huge'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); BEGIN; INSERT INTO public.a_huge VALUES (generate_series(1, 20000000), generate_series(1, 20000000)); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM a_huge; \dtS+ a_huge; \c :provider_dsn -- lots of small rows replication with DDL within transaction BEGIN; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.b_huge ( id integer primary key, id1 integer, data text default 'data', data1 text default 'data1' ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'b_huge'); INSERT INTO public.b_huge VALUES (generate_series(1,20000000), generate_series(1,20000000)); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM b_huge; \dtS+ b_huge; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.a_huge CASCADE; DROP TABLE public.b_huge CASCADE; $$); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); pglogical-REL2_4_1/sql/huge_tx_100k_tables.sql000066400000000000000000000062101415142317000212310ustar00rootroot00000000000000-- test huge transactions -- Set 'max_locks_per_transaction' to 10000 to run test SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- medium number of rows in many different tables (100k): replication with DDL outside transaction create or replace function create_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' CREATE TABLE public.HUGE' || i || ' ( id integer primary key, id1 integer, data text default ''''data'''', data1 text default ''''data1'''' ); '')'; EXECUTE cr_command; END LOOP; END; $$; -- write multile version of this statement create or replace function add_many_tables_to_replication_set(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT * FROM pglogical.replication_set_add_table( ''default'', ''HUGE' || i || ''' );'; EXECUTE cr_command; END LOOP; END; $$; create or replace function insert_into_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'INSERT INTO public.HUGE' || i || ' VALUES (generate_series(1, 200), generate_series(1, 200))'; EXECUTE cr_command; END LOOP; END; $$; create or replace function drop_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' DROP TABLE public.HUGE' || i ||' CASCADE; '')'; EXECUTE cr_command; END LOOP; END; $$; SELECT * FROM create_many_tables(1,100000); SELECT * FROM add_many_tables_to_replication_set(1,100000); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); BEGIN; SELECT * FROM insert_into_many_tables(1,100000); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; \dtS+ public.HUGE2; \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,100000); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- medium number of rows in many different tables: replication with DDL inside transaction BEGIN; SELECT * FROM create_many_tables(1,100000); SELECT * FROM add_many_tables_to_replication_set(1,100000); SELECT * FROM insert_into_many_tables(1,100000); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; \dtS+ public.HUGE2; \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,100000); DROP function create_many_tables(int, int); DROP function add_many_tables_to_replication_set(int,int); DROP function insert_into_many_tables(int, int); DROP function drop_many_tables(int, int); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); pglogical-REL2_4_1/sql/huge_tx_many_tables.sql000066400000000000000000000060571415142317000215330ustar00rootroot00000000000000-- test huge transactions SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- medium number of rows in many different tables: replication with DDL outside transaction create or replace function create_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' CREATE TABLE public.HUGE' || i || ' ( id integer primary key, id1 integer, data text default ''''data'''', data1 text default ''''data1'''' ); '')'; EXECUTE cr_command; END LOOP; END; $$; --write multiple version of this. create or replace function add_many_tables_to_replication_set(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT * FROM pglogical.replication_set_add_table( ''default'', ''HUGE' || i || ''' );'; EXECUTE cr_command; END LOOP; END; $$; create or replace function insert_into_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'INSERT INTO public.HUGE' || i || ' VALUES (generate_series(1, 100000), generate_series(1, 100000))'; EXECUTE cr_command; END LOOP; END; $$; create or replace function drop_many_tables(int, int) returns void language plpgsql as $$ DECLARE i int; cr_command varchar; BEGIN FOR i IN $1 .. $2 LOOP cr_command := 'SELECT pglogical.replicate_ddl_command('' DROP TABLE public.HUGE' || i ||' CASCADE; '')'; EXECUTE cr_command; END LOOP; END; $$; SELECT * FROM create_many_tables(1,200); SELECT * FROM add_many_tables_to_replication_set(1,200); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); BEGIN; SELECT * FROM insert_into_many_tables(1,200); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; \dtS+ public.HUGE2; \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,200); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- medium number of rows in many different tables: replication with DDL inside transaction BEGIN; SELECT * FROM create_many_tables(1,200); SELECT * FROM add_many_tables_to_replication_set(1,200); SELECT * FROM insert_into_many_tables(1,200); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT count(*) FROM public.HUGE2; \dtS+ public.HUGE2; \c :provider_dsn \set VERBOSITY terse SELECT * FROM drop_many_tables(1,200); DROP function create_many_tables(int, int); DROP function add_many_tables_to_replication_set(int, int); DROP function insert_into_many_tables(int, int); DROP function drop_many_tables(int, int); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); pglogical-REL2_4_1/sql/infofuncs.sql000066400000000000000000000015261415142317000175000ustar00rootroot00000000000000DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION pglogical; SELECT pglogical.pglogical_max_proto_version(); SELECT pglogical.pglogical_min_proto_version(); -- test extension version SELECT pglogical.pglogical_version() = extversion FROM pg_extension WHERE extname = 'pglogical'; DROP EXTENSION pglogical; -- test upgrades DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; SELECT pglogical.pglogical_version() = extversion FROM pg_extension WHERE extname = 'pglogical'; DROP EXTENSION pglogical; pglogical-REL2_4_1/sql/init.sql000066400000000000000000000062041415142317000164470ustar00rootroot00000000000000-- This should be done with pg_regress's --create-role option -- but it's blocked by bug 37906 SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SET client_min_messages = 'warning'; DROP USER IF EXISTS nonsuper; DROP USER IF EXISTS super; CREATE USER nonsuper WITH replication; CREATE USER super SUPERUSER; \c :subscriber_dsn SET client_min_messages = 'warning'; DROP USER IF EXISTS nonsuper; DROP USER IF EXISTS super; CREATE USER nonsuper WITH replication; CREATE USER super SUPERUSER; -- Can't because of bug 37906 --GRANT ALL ON DATABASE regress TO nonsuper; --GRANT ALL ON DATABASE regress TO nonsuper; \c :provider_dsn GRANT ALL ON SCHEMA public TO nonsuper; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') >= 1000 THEN CREATE OR REPLACE FUNCTION public.pg_current_xlog_location() RETURNS pg_lsn LANGUAGE SQL AS 'SELECT pg_current_wal_lsn()'; ALTER FUNCTION public.pg_current_xlog_location() OWNER TO super; END IF; END; $$; \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; \gset \c :provider_dsn SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; \dx pglogical SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super'); \c :subscriber_dsn SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=super'); BEGIN; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := true, forward_origins := '{}'); /* * Remove the function we added in preseed because otherwise the restore of * schema will fail. We do this in same transaction as create_subscription() * because the subscription process will only start on commit. */ DROP FUNCTION IF EXISTS public.pglogical_regress_variables(); COMMIT; BEGIN; SET LOCAL statement_timeout = '30s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription'); COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; pglogical-REL2_4_1/sql/init_fail.sql000066400000000000000000000057351415142317000174520ustar00rootroot00000000000000 SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SET client_min_messages = 'warning'; DROP ROLE IF EXISTS nonreplica; CREATE USER nonreplica; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; GRANT ALL ON SCHEMA pglogical TO nonreplica; GRANT ALL ON ALL TABLES IN SCHEMA pglogical TO nonreplica; \c :subscriber_dsn SET client_min_messages = 'warning'; \set VERBOSITY terse DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; -- fail (local node not existing) SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); -- succeed SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := (SELECT subscriber_dsn FROM pglogical_regress_variables()) || ' user=nonreplica'); -- fail (can't connect to remote) DO $$ BEGIN SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonexisting', forward_origins := '{}'); EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION '%:%', split_part(SQLERRM, ':', 1), (regexp_matches(SQLERRM, '^.*( FATAL:.*role.*)$'))[1]; END; $$; -- fail (remote node not existing) SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); \c :provider_dsn -- succeed SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica'); \c :subscriber_dsn \set VERBOSITY terse -- fail (can't connect with replication connection to remote) DO $$ BEGIN SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=nonreplica', forward_origins := '{}'); EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION '%', split_part(SQLERRM, ':', 1); END; $$; -- cleanup SELECT * FROM pglogical.drop_node('test_subscriber'); DROP EXTENSION pglogical; \c :provider_dsn SELECT * FROM pglogical.drop_node('test_provider'); SET client_min_messages = 'warning'; DROP OWNED BY nonreplica; DROP ROLE IF EXISTS nonreplica; DROP EXTENSION pglogical; pglogical-REL2_4_1/sql/interfaces.sql000066400000000000000000000030321415142317000176230ustar00rootroot00000000000000 SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn CREATE USER super2 SUPERUSER; \c :subscriber_dsn SELECT * FROM pglogical.alter_node_add_interface('test_provider', 'super2', (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super2'); SELECT * FROM pglogical.alter_subscription_interface('test_subscription', 'super2'); DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status != 'down') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT pg_sleep(0.1); SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); \c :provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT usename FROM pg_stat_replication WHERE application_name = 'test_subscription'; \c :subscriber_dsn SELECT * FROM pglogical.alter_subscription_interface('test_subscription', 'test_provider'); DO $$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status != 'down') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT pg_sleep(0.1); SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); \c :provider_dsn DROP USER super2; SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT usename FROM pg_stat_replication WHERE application_name = 'test_subscription'; pglogical-REL2_4_1/sql/matview.sql000066400000000000000000000032001415142317000171510ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_tbl(id serial primary key, data text); CREATE MATERIALIZED VIEW public.test_mv AS (SELECT * FROM public.test_tbl); $$); SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); INSERT INTO test_tbl VALUES (1, 'a'); REFRESH MATERIALIZED VIEW test_mv; INSERT INTO test_tbl VALUES (2, 'b'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); SELECT * FROM test_tbl ORDER BY id; SELECT * FROM test_mv ORDER BY id; \c :subscriber_dsn SELECT * FROM test_tbl ORDER BY id; SELECT * FROM test_mv ORDER BY id; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX ON public.test_mv(id); $$); INSERT INTO test_tbl VALUES (3, 'c'); REFRESH MATERIALIZED VIEW CONCURRENTLY test_mv; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO test_tbl VALUES (4, 'd'); SELECT pglogical.replicate_ddl_command($$ REFRESH MATERIALIZED VIEW public.test_mv; $$); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); INSERT INTO test_tbl VALUES (5, 'e'); SELECT pglogical.replicate_ddl_command($$ REFRESH MATERIALIZED VIEW CONCURRENTLY public.test_mv; $$); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); SELECT * FROM test_tbl ORDER BY id; SELECT * FROM test_mv ORDER BY id; \c :subscriber_dsn SELECT * FROM test_tbl ORDER BY id; SELECT * FROM test_mv ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_tbl CASCADE; $$); pglogical-REL2_4_1/sql/multiple_upstreams.sql000066400000000000000000000061241415142317000214430ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :subscriber_dsn GRANT ALL ON SCHEMA public TO nonsuper; SELECT E'\'' || current_database() || E'\'' AS subdb; \gset \c :provider1_dsn SET client_min_messages = 'warning'; GRANT ALL ON SCHEMA public TO nonsuper; SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; CREATE EXTENSION IF NOT EXISTS pglogical; SELECT * FROM pglogical.create_node(node_name := 'test_provider1', dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super'); \c :provider_dsn -- add these entries to provider SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.multi_ups_tbl(id integer primary key, key text unique not null, data text); $$); INSERT INTO multi_ups_tbl VALUES(1, 'key1', 'data1'); INSERT INTO multi_ups_tbl VALUES(2, 'key2', 'data2'); INSERT INTO multi_ups_tbl VALUES(3, 'key3', 'data3'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl', true); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :provider1_dsn -- add these entries to provider1 CREATE TABLE multi_ups_tbl(id integer primary key, key text unique not null, data text); INSERT INTO multi_ups_tbl VALUES(4, 'key4', 'data4'); INSERT INTO multi_ups_tbl VALUES(5, 'key5', 'data5'); INSERT INTO multi_ups_tbl VALUES(6, 'key6', 'data6'); SELECT * FROM pglogical.replication_set_add_table('default', 'multi_ups_tbl'); \c :subscriber_dsn -- We'll use the already existing pglogical node -- notice synchronize_structure as false when table definition already exists SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription1', provider_dsn := (SELECT provider1_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, forward_origins := '{}'); BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription1'); COMMIT; SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; SELECT * from multi_ups_tbl ORDER BY id; -- Make sure we see the slot and active connection \c :provider1_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; -- cleanup \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.multi_ups_tbl CASCADE; $$); \c :provider1_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider1'); \set VERBOSITY terse DROP TABLE public.multi_ups_tbl CASCADE; \c :subscriber_dsn SELECT * FROM pglogical.drop_subscription('test_subscription1'); \c :provider1_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_provider1'); SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; pglogical-REL2_4_1/sql/node_origin_cascade.sql000066400000000000000000000074141415142317000214470ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT E'\'' || current_database() || E'\'' AS pubdb; \gset \c :orig_provider_dsn SET client_min_messages = 'warning'; GRANT ALL ON SCHEMA public TO nonsuper; SET client_min_messages = 'warning'; DO $$ BEGIN IF (SELECT setting::integer/100 FROM pg_settings WHERE name = 'server_version_num') = 904 THEN CREATE EXTENSION IF NOT EXISTS pglogical_origin; END IF; END;$$; DO $$ BEGIN IF version() ~ 'Postgres-XL' THEN CREATE EXTENSION IF NOT EXISTS pglogical; ELSE CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'; END IF; END; $$; ALTER EXTENSION pglogical UPDATE; SELECT * FROM pglogical.create_node(node_name := 'test_orig_provider', dsn := (SELECT orig_provider_dsn FROM pglogical_regress_variables()) || ' user=super'); \c :provider_dsn SET client_min_messages = 'warning'; -- test_provider pglogical node already exists here. BEGIN; SELECT * FROM pglogical.create_subscription( subscription_name := 'test_orig_subscription', provider_dsn := (SELECT orig_provider_dsn FROM pglogical_regress_variables()) || ' user=super', synchronize_structure := false, forward_origins := '{}'); COMMIT; BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_orig_subscription'); COMMIT; SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status(); SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; -- Make sure we see the slot and active connection \c :orig_provider_dsn SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; -- Table that replicates from top level provider to mid-level pglogical node. \c :orig_provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.top_level_tbl ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'top_level_tbl'); INSERT INTO top_level_tbl(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :provider_dsn SELECT id, other, data, something FROM top_level_tbl ORDER BY id; -- Table that replicates from top level provider to mid-level pglogical node. SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.mid_level_tbl ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'mid_level_tbl'); INSERT INTO mid_level_tbl(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, something FROM mid_level_tbl ORDER BY id; -- drop the tables \c :orig_provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.top_level_tbl CASCADE; $$); \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.mid_level_tbl CASCADE; $$); \c :provider_dsn SELECT * FROM pglogical.drop_subscription('test_orig_subscription'); \c :orig_provider_dsn SELECT * FROM pglogical.drop_node(node_name := 'test_orig_provider'); SELECT plugin, slot_type, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; pglogical-REL2_4_1/sql/parallel.sql000066400000000000000000000053421415142317000173020ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT * FROM pglogical.create_replication_set('parallel'); \c :subscriber_dsn SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_parallel', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{parallel,default}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false ); SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_parallel', provider_dsn := (SELECT provider_dsn FROM pglogical_regress_variables()) || ' user=super', replication_sets := '{parallel}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false ); BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription_parallel'); COMMIT; SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status IN ('y', 'r') FROM pglogical.local_sync_status ORDER BY 2,3,4; SELECT * FROM pglogical.show_subscription_status(); -- Make sure we see the slot and active connection \c :provider_dsn SELECT plugin, slot_type, database, active FROM pg_replication_slots; SELECT count(*) FROM pg_stat_replication; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); CREATE TABLE public.basic_dml2 ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml1'); SELECT * FROM pglogical.replication_set_add_table('parallel', 'basic_dml2'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); WITH one AS ( INSERT INTO basic_dml1(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL) RETURNING * ) INSERT INTO basic_dml2 SELECT * FROM one; BEGIN; UPDATE basic_dml1 SET other = id, something = something - '10 seconds'::interval WHERE id < 3; DELETE FROM basic_dml2 WHERE id < 3; COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); SELECT * FROM basic_dml1; SELECT * FROM basic_dml2; \c :subscriber_dsn SELECT * FROM basic_dml1; SELECT * FROM basic_dml2; SELECT pglogical.drop_subscription('test_subscription_parallel'); \c :provider_dsn \set VERBOSITY terse SELECT * FROM pglogical.drop_replication_set('parallel'); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml1 CASCADE; DROP TABLE public.basic_dml2 CASCADE; $$); pglogical-REL2_4_1/sql/preseed.sql000066400000000000000000000073051415142317000171360ustar00rootroot00000000000000-- Indirection for connection strings CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; SELECT * FROM pglogical_regress_variables() \gset /* * Tests to ensure that objects/data that exists pre-clone is successfully * cloned. The results are checked, after the clone, in preseed_check.sql. */ \c :provider_dsn CREATE SEQUENCE some_local_seq; CREATE TABLE some_local_tbl(id serial primary key, key text unique not null, data text); INSERT INTO some_local_tbl(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl1(id serial, key text unique not null, data text); INSERT INTO some_local_tbl1(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl1(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl1(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl2(id serial, key text, data text); INSERT INTO some_local_tbl2(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl2(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl2(key, data) VALUES('key3', 'data3'); CREATE TABLE some_local_tbl3(id integer, key text, data text); INSERT INTO some_local_tbl3(key, data) VALUES('key1', 'data1'); INSERT INTO some_local_tbl3(key, data) VALUES('key2', NULL); INSERT INTO some_local_tbl3(key, data) VALUES('key3', 'data3'); /* * Make sure that the pglogical_regress_variables function exists both on * provider and subscriber since the original connection might have been * to completely different database. */ CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; CREATE DATABASE regression1; CREATE DATABASE sourcedb; \c :orig_provider_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; \c :provider1_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; \c :subscriber_dsn CREATE OR REPLACE FUNCTION public.pglogical_regress_variables( OUT orig_provider_dsn text, OUT provider_dsn text, OUT provider1_dsn text, OUT subscriber_dsn text ) RETURNS record LANGUAGE SQL AS $f$ SELECT current_setting('pglogical.orig_provider_dsn'), current_setting('pglogical.provider_dsn'), current_setting('pglogical.provider1_dsn'), current_setting('pglogical.subscriber_dsn') $f$; pglogical-REL2_4_1/sql/preseed_check.sql000066400000000000000000000034701415142317000202720ustar00rootroot00000000000000-- Verify data from preseed.sql has correctly been cloned SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl1'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl1 ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl2'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl2 ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl3'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl3 ORDER BY id; \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl1'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl1 ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl2'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl2 ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'some_local_tbl3'::regclass and attnum > 0 order by attnum; SELECT * FROM some_local_tbl3 ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP SEQUENCE public.some_local_seq; DROP TABLE public.some_local_tbl; DROP TABLE public.some_local_tbl1; DROP TABLE public.some_local_tbl2; DROP TABLE public.some_local_tbl3; $$); pglogical-REL2_4_1/sql/primary_key.sql000066400000000000000000000313361415142317000200430ustar00rootroot00000000000000--PRIMARY KEY SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- testing update of primary key -- create table with primary key and 3 other tables referencing it SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.pk_users ( id integer PRIMARY KEY, another_id integer unique not null, a_id integer, name text, address text ); --pass $$); SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); INSERT INTO pk_users VALUES(1,11,1,'User1', 'Address1'); INSERT INTO pk_users VALUES(2,12,1,'User2', 'Address2'); INSERT INTO pk_users VALUES(3,13,2,'User3', 'Address3'); INSERT INTO pk_users VALUES(4,14,2,'User4', 'Address4'); SELECT * FROM pk_users ORDER BY id; SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress1' WHERE id=1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; -- Set up for secondary unique index and two-index -- conflict handling cases. INSERT INTO pk_users VALUES (5000,5000,0,'sub1',NULL); INSERT INTO pk_users VALUES (6000,6000,0,'sub2',NULL); \c :provider_dsn -- Resolve a conflict on the secondary unique constraint INSERT INTO pk_users VALUES (5001,5000,1,'pub1',NULL); -- And a conflict that violates two constraints INSERT INTO pk_users VALUES (6000,6000,1,'pub2',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users WHERE id IN (5000,5001,6000) ORDER BY id; \c :provider_dsn DELETE FROM pk_users WHERE id IN (5000,5001,6000); \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX another_id_temp_idx ON public.pk_users (another_id); ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY USING INDEX another_id_temp_idx; ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_another_id_key; $$); SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; UPDATE pk_users SET address='UpdatedAddress2' WHERE id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; SELECT * FROM pk_users ORDER BY id; \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress3' WHERE another_id=12; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; \c :provider_dsn UPDATE pk_users SET address='UpdatedAddress4' WHERE a_id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn INSERT INTO pk_users VALUES(4,15,2,'User5', 'Address5'); -- subscriber now has duplicated value in id field while provider does not SELECT * FROM pk_users ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT quote_literal(pg_current_xlog_location()) as curr_lsn \gset SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX id_temp_idx ON public.pk_users (id); ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY USING INDEX id_temp_idx; $$); SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; SELECT pglogical.wait_slot_confirm_lsn(NULL, :curr_lsn); \c :subscriber_dsn SELECT attname, attnotnull, attisdropped from pg_attribute where attrelid = 'pk_users'::regclass and attnum > 0 order by attnum; SELECT pglogical.alter_subscription_disable('test_subscription', true); \c :provider_dsn -- Wait for subscription to disconnect. It will have been bouncing already -- due to apply worker restarts, but if it was retrying it'll stay down -- this time. DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = false) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN json_extract_path(data::json, 'relation') END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default,ddl_sql'); SELECT data::json->'action' as action, CASE WHEN data::json->>'action' IN ('I', 'D', 'U') THEN data END as data FROM pg_logical_slot_get_changes((SELECT slot_name FROM pg_replication_slots), NULL, 1, 'min_proto_version', '1', 'max_proto_version', '1', 'startup_params_format', '1', 'proto_format', 'json', 'pglogical.replication_set_names', 'default,ddl_sql'); \c :subscriber_dsn SELECT pglogical.alter_subscription_enable('test_subscription', true); DELETE FROM pk_users WHERE id = 4;-- remove the offending entries. \c :provider_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF (SELECT count(1) FROM pg_replication_slots WHERE active = true) THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; $$; UPDATE pk_users SET address='UpdatedAddress2' WHERE id=2; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; \c :provider_dsn -- -- Test to show that we don't defend against alterations to tables -- that will break replication once added to a repset, or prevent -- dml that would break on apply. -- -- See 2ndQuadrant/pglogical_internal#146 -- -- Show that the current PK is not marked 'indisreplident' because we use -- REPLICA IDENTITY DEFAULT SELECT indisreplident FROM pg_index WHERE indexrelid = 'pk_users_pkey'::regclass; SELECT relreplident FROM pg_class WHERE oid = 'pk_users'::regclass; SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey; $$); INSERT INTO pk_users VALUES(90,0,0,'User90', 'Address90'); -- pglogical will stop us adding the table to a repset if we try to, -- but didn't stop us altering it, and won't stop us updating it... BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'pk_users'); SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); ROLLBACK; -- Per 2ndQuadrant/pglogical_internal#146 this shouldn't be allowed, but -- currently is. Logical decoding will fail to capture this change and we -- won't progress with decoding. -- -- This will get recorded by logical decoding with no 'oldkey' values, -- causing pglogical to fail to apply it with an error like -- -- CONFLICT: remote UPDATE on relation public.pk_users (tuple not found). Resolution: skip. -- UPDATE pk_users SET id = 91 WHERE id = 90; -- Catchup will replay the insert and succeed, but the update -- will be lost. BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ROLLBACK; -- To carry on we'll need to make the index on the downstream -- (which is odd, because logical decoding didn't capture the -- oldkey of the tuple, so how can we apply it?) \c :subscriber_dsn ALTER TABLE public.pk_users ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; \c :provider_dsn ALTER TABLE public.pk_users ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- Demonstrate that deferrable indexes aren't yet supported for updates on downstream -- and will fail with an informative error. SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) DEFERRABLE INITIALLY DEFERRED; $$); -- Not allowed, deferrable ALTER TABLE public.pk_users REPLICA IDENTITY USING INDEX pk_users_pkey; -- New index isn't REPLICA IDENTITY either SELECT indisreplident FROM pg_index WHERE indexrelid = 'pk_users_pkey'::regclass; -- pglogical won't let us add the table to a repset, though -- it doesn't stop us altering it; see 2ndQuadrant/pglogical_internal#146 BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'pk_users'); SELECT * FROM pglogical.replication_set_add_table('default', 'pk_users'); ROLLBACK; -- We can still INSERT (which is fine) INSERT INTO pk_users VALUES(100,0,0,'User100', 'Address100'); -- FIXME pglogical shouldn't allow this, no valid replica identity exists -- see 2ndQuadrant/pglogical_internal#146 UPDATE pk_users SET id = 101 WHERE id = 100; -- Must time out, apply will fail on downstream due to no replident index BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ROLLBACK; \c :subscriber_dsn -- entry 100 must be absent since we can't apply it without -- a suitable pk SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; -- we can recover by re-creating the pk as non-deferrable ALTER TABLE public.pk_users DROP CONSTRAINT pk_users_pkey, ADD CONSTRAINT pk_users_pkey PRIMARY KEY (id) NOT DEFERRABLE; -- then replay. Toggle the subscription's enabled state -- to make it recover faster for a quicker test run. SELECT pglogical.alter_subscription_disable('test_subscription', true); SELECT pglogical.alter_subscription_enable('test_subscription', true); \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; \c :provider_dsn -- Subscriber and provider have diverged due to inability to replicate -- the UPDATEs SELECT id FROM pk_users WHERE id IN (90, 91, 100, 101) ORDER BY id; -- Demonstrate that we properly handle wide conflict rows \c :subscriber_dsn INSERT INTO pk_users (id, another_id, address) VALUES (200,2000,repeat('waah daah sooo mooo', 1000)); \c :provider_dsn INSERT INTO pk_users (id, another_id, address) VALUES (200,2000,repeat('boop boop doop boop', 1000)); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, another_id, left(address,30) AS address_abbrev FROM pk_users WHERE another_id = 2000; -- DELETE conflicts; the DELETE is discarded \c :subscriber_dsn DELETE FROM pk_users WHERE id = 1; \c :provider_dsn DELETE FROM pk_users WHERE id = 1; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- UPDATE conflicts violating multiple constraints. -- For this one we need to put the secondary unique -- constraint back. TRUNCATE TABLE pk_users; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); SELECT pglogical.replicate_ddl_command($$ CREATE UNIQUE INDEX pk_users_another_id_idx ON public.pk_users(another_id); $$); \c :subscriber_dsn INSERT INTO pk_users VALUES (1,10,0,'sub',NULL), (2,20,0,'sub',NULL); \c :provider_dsn INSERT INTO pk_users VALUES (3,11,1,'pub',NULL), (4,22,1,'pub',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; \c :provider_dsn -- UPDATE one of our upstream tuples to violate both constraints on the -- downstream. The constraints are independent but there's only one existing -- downstream tuple that violates both constraints. We'll match it by replica -- identity, replace it, and satisfy the other constraint in the process. UPDATE pk_users SET id=1, another_id = 10, name='should_error' WHERE id = 3 AND another_id = 11; SELECT * FROM pk_users ORDER BY id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- UPDATEs to missing rows could either resurrect the row or conclude it -- shouldn't exist and discard it. Currently pgl unconditionally discards, so -- this row's name is a misnomer. \c :subscriber_dsn DELETE FROM pk_users WHERE id = 4 AND another_id = 22; \c :provider_dsn UPDATE pk_users SET name = 'jesus' WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- No resurrection here SELECT * FROM pk_users ORDER BY id; -- But if the UPDATE would create a row that violates -- a secondary unique index (but doesn't match the replident) -- we'll ERROR on the secondary index. INSERT INTO pk_users VALUES (5,55,0,'sub',NULL); \c :provider_dsn INSERT INTO pk_users VALUES (6,66,0,'sub',NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- The new row (6,55) will conflict with (5,55) UPDATE pk_users SET another_id = 55, name = 'pub_should_error' WHERE id = 6; SELECT * FROM pk_users ORDER BY id; -- We'll time out due to apply errors BEGIN; SET LOCAL statement_timeout = '2s'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); ROLLBACK; -- This time we'll fix it by deleting the conflicting row \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; DELETE FROM pk_users WHERE id = 5; \c :provider_dsn SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM pk_users ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.pk_users CASCADE; $$); pglogical-REL2_4_1/sql/replication_set.sql000066400000000000000000000104701415142317000206700ustar00rootroot00000000000000/* First test whether a table's replication set can be properly manipulated */ SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE SCHEMA normalschema; CREATE SCHEMA "strange.schema-IS"; CREATE TABLE public.test_publicschema(id serial primary key, data text); CREATE TABLE normalschema.test_normalschema(id serial primary key); CREATE TABLE "strange.schema-IS".test_strangeschema(id serial primary key); CREATE TABLE public.test_nopkey(id int); CREATE UNLOGGED TABLE public.test_unlogged(id int primary key); $$); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- show initial replication sets SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; -- not existing replication set SELECT * FROM pglogical.replication_set_add_table('nonexisting', 'test_publicschema'); -- create some replication sets SELECT * FROM pglogical.create_replication_set('repset_replicate_all'); SELECT * FROM pglogical.create_replication_set('repset_replicate_instrunc', replicate_update := false, replicate_delete := false); SELECT * FROM pglogical.create_replication_set('repset_replicate_insupd', replicate_delete := false, replicate_truncate := false); -- add tables SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_publicschema'); SELECT * FROM pglogical.replication_set_add_table('repset_replicate_instrunc', 'normalschema.test_normalschema'); SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', 'normalschema.test_normalschema'); SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', '"strange.schema-IS".test_strangeschema'); -- should fail SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_unlogged'); SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'test_nopkey'); -- success SELECT * FROM pglogical.replication_set_add_table('repset_replicate_instrunc', 'test_nopkey'); SELECT * FROM pglogical.alter_replication_set('repset_replicate_insupd', replicate_truncate := true); -- fail again SELECT * FROM pglogical.replication_set_add_table('repset_replicate_insupd', 'test_nopkey'); SELECT * FROM pglogical.replication_set_add_all_tables('default', '{public}'); SELECT * FROM pglogical.alter_replication_set('repset_replicate_instrunc', replicate_update := true); SELECT * FROM pglogical.alter_replication_set('repset_replicate_instrunc', replicate_delete := true); -- Adding already-added fails \set VERBOSITY terse SELECT * FROM pglogical.replication_set_add_table('repset_replicate_all', 'public.test_publicschema'); \set VERBOSITY default -- check the replication sets SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; SELECT * FROM pglogical.replication_set_add_all_tables('default_insert_only', '{public}'); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname IN ('test_publicschema', 'test_normalschema', 'test_strangeschema', 'test_nopkey') ORDER BY 1,2,3; --too short SELECT pglogical.create_replication_set(''); -- Can't drop table while it's in a repset DROP TABLE public.test_publicschema; -- Can't drop table while it's in a repset BEGIN; SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema; $$); ROLLBACK; -- Can CASCADE though, even outside ddlrep BEGIN; DROP TABLE public.test_publicschema CASCADE; ROLLBACK; -- ... and can drop after repset removal SELECT pglogical.replication_set_remove_table('repset_replicate_all', 'public.test_publicschema'); SELECT pglogical.replication_set_remove_table('default_insert_only', 'public.test_publicschema'); BEGIN; DROP TABLE public.test_publicschema; ROLLBACK; \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_publicschema CASCADE; DROP SCHEMA normalschema CASCADE; DROP SCHEMA "strange.schema-IS" CASCADE; DROP TABLE public.test_nopkey CASCADE; DROP TABLE public.test_unlogged CASCADE; $$); \c :subscriber_dsn SELECT * FROM pglogical.replication_set; pglogical-REL2_4_1/sql/row_filter.sql000066400000000000000000000330401415142317000176560ustar00rootroot00000000000000-- row based filtering SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, "SomeThing" interval, insert_xid bigint DEFAULT txid_current() ); $$); -- used to check if initial copy does row filtering \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV 5000,1,aaa,1 hour 5001,2,bbb,2 years 5002,3,ccc,3 minutes 5003,4,ddd,4 days \. -- create some functions: CREATE FUNCTION funcn_add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; create function funcn_nochange(text) returns text as 'select $1 limit 1' language sql stable; create or replace function funcn_get_curr_decade() returns integer as $$ (SELECT EXTRACT(DECADE FROM NOW()):: integer); $$ language sql volatile; -- we allow volatile functions, it's user's responsibility to not do writes SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'current_user = data'); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); -- fail -- subselect SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := '(SELECT count(*) FROM pg_class) > 1'); -- fail -- SELECT SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'SELECT true'); -- fail -- nonexisting column SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'foobar'); -- fail -- not coercable to bool SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := 'data'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id between 2 AND 4$rf$); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := NULL); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$id > funcn_add(1,2) $rf$); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$data = funcn_nochange('baz') $rf$); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ other > funcn_get_curr_decade() $rf$); SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); -- use this filter for rest of the test SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', true, row_filter := $rf$id > 1 AND data IS DISTINCT FROM 'baz' AND data IS DISTINCT FROM 'bbb'$rf$); SELECT nspname, relname, set_name FROM pglogical.tables WHERE relname = 'basic_dml'; -- fail, the membership in repset depends on data column \set VERBOSITY terse ALTER TABLE basic_dml DROP COLUMN data; \set VERBOSITY default SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- wait for the initial data to copy BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_subscription_sync_complete('test_subscription'); COMMIT; SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; ALTER TABLE public.basic_dml ADD COLUMN subonly integer; ALTER TABLE public.basic_dml ADD COLUMN subonly_def integer DEFAULT 99; ALTER TABLE public.basic_dml ADD COLUMN subonly_def_ts timestamptz DEFAULT current_timestamp; \c :provider_dsn TRUNCATE basic_dml; -- check basic insert replication INSERT INTO basic_dml(other, data, "SomeThing") VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; -- update one row \c :provider_dsn UPDATE basic_dml SET other = '4', data = NULL, "SomeThing" = '3 days'::interval WHERE id = 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; -- update multiple rows \c :provider_dsn UPDATE basic_dml SET other = id, data = data || id::text; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" - '10 seconds'::interval WHERE id < 3; UPDATE basic_dml SET other = id, "SomeThing" = "SomeThing" + '10 seconds'::interval WHERE id > 3; DELETE FROM basic_dml WHERE id = 3; INSERT INTO basic_dml VALUES (3, 99, 'bazbaz', '2 years 1 hour'::interval); INSERT INTO basic_dml VALUES (7, 100, 'bazbaz', '2 years 1 hour'::interval); UPDATE basic_dml SET data = 'baz' WHERE id in (3,7); -- This update would be filtered at subscriber SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 3; -- This update would again start to be received at subscriber DELETE FROM basic_dml WHERE data = 'baz'; -- Delete reaches the subscriber for a filtered row INSERT INTO basic_dml VALUES (6, 100, 'baz', '2 years 1 hour'::interval); -- insert would be filtered SELECT id, other, data, "SomeThing" from basic_dml ORDER BY id; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing", subonly, subonly_def FROM basic_dml ORDER BY id; \c :provider_dsn UPDATE basic_dml SET data = 'bar' WHERE id = 6; UPDATE basic_dml SET data = 'abcd' WHERE id = 6; -- These updates would continue to be missed on subscriber -- as it does not have the primary key SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; -- transaction timestamp should be updated for each row (see #148) SELECT count(DISTINCT subonly_def_ts) = count(DISTINCT insert_xid) FROM basic_dml; -- delete multiple rows \c :provider_dsn DELETE FROM basic_dml WHERE id < 4; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; -- truncate \c :provider_dsn TRUNCATE basic_dml; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; -- copy \c :provider_dsn \COPY basic_dml(id, other, data, "SomeThing") FROM STDIN WITH CSV 9000,1,aaa,1 hour 9001,2,bbb,2 years 9002,3,ccc,3 minutes 9003,4,ddd,4 days \. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT id, other, data, "SomeThing" FROM basic_dml ORDER BY id; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_jsonb ( json_type text primary key, test_json jsonb ); $$); INSERT INTO test_jsonb VALUES ('scalar','"a scalar"'), ('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), ('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); SELECT * FROM pglogical.replication_set_add_table('default', 'test_jsonb', true, row_filter := $rf$(test_json ->> 'field2') IS DISTINCT FROM 'val2' $rf$); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn DO $$ BEGIN FOR i IN 1..100 LOOP IF NOT EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status != 'r') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;$$; SELECT * FROM test_jsonb ORDER BY json_type; \c :provider_dsn -- Filter may refer to not-replicated columns SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, columns := ARRAY['id', 'data'], row_filter := $rf$other = 2$rf$); INSERT INTO basic_dml(other, data, "SomeThing") VALUES (2, 'itstwo', '1 second'::interval); SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- 'other' will be NULL as it wasn't in the repset -- even though we filtered on it. So will SomeThing. SELECT other, data, "SomeThing" FROM basic_dml WHERE data = 'itstwo'; \c :provider_dsn --------------------------------------------------- -- Enhanced function tests covering basic plpgsql --------------------------------------------------- CREATE FUNCTION func_plpgsql_simple(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN RETURN arg; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_simple(other) = 100 $rf$); -- Should FAIL due to dependency -- -- FIXME: Succeeds incorrectly (RM#5880) leading to -- cache lookup failed for function" errors in logs if allowed to commit -- BEGIN; DROP FUNCTION func_plpgsql_simple(integer); ROLLBACK; INSERT INTO basic_dml (other) VALUES (100), (101); SELECT other FROM basic_dml WHERE other IN (100,101); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (100,101); \c :provider_dsn CREATE FUNCTION func_plpgsql_logic(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN IF arg = 200 THEN RETURN arg; ELSE RETURN 0; END IF; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_logic(other) = other $rf$); INSERT INTO basic_dml (other) VALUES (200), (201); SELECT other FROM basic_dml WHERE other IN (200,201); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (200,201); \c :provider_dsn CREATE FUNCTION func_plpgsql_security_definer(arg integer) RETURNS integer LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RAISE NOTICE 'c_u: %, s_u: %', current_user, session_user; RETURN arg; END; $$; CREATE ROLE temp_owner; ALTER FUNCTION func_plpgsql_security_definer(integer) OWNER TO temp_owner; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_security_definer(other) = 300 $rf$); INSERT INTO basic_dml (other) VALUES (300), (301); SELECT other FROM basic_dml WHERE other IN (300,301); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (300,301); \c :provider_dsn CREATE FUNCTION func_plpgsql_exception(arg integer) RETURNS integer LANGUAGE plpgsql AS $$ BEGIN BEGIN SELECT arg/0; EXCEPTION WHEN division_by_zero THEN RETURN arg; END; RAISE EXCEPTION 'should be unreachable'; END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_exception(other) = 400 $rf$); INSERT INTO basic_dml (other) VALUES (400), (401); SELECT other FROM basic_dml WHERE other IN (400,401); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (400,401); \c :provider_dsn -- Should not be able to use a SETOF or TABLE func directly -- but we can do it via a wrapper: CREATE FUNCTION func_plpgsql_srf_retq(arg integer) RETURNS TABLE (result integer, dummy boolean) LANGUAGE plpgsql SECURITY DEFINER AS $$ BEGIN RETURN QUERY SELECT arg * x, true FROM generate_series(1,2) x; RETURN; END; $$; -- fails with SRF context error BEGIN; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ (func_plpgsql_srf_retq(other)).result = 500 $rf$); ROLLBACK; CREATE FUNCTION func_plpgsql_call_set(arg integer) RETURNS boolean LANGUAGE plpgsql AS $$ BEGIN RETURN (SELECT true FROM func_plpgsql_srf_retq(arg) WHERE result = arg * 2); END; $$; SELECT * FROM pglogical.replication_set_remove_table('default', 'basic_dml'); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml', false, row_filter := $rf$ func_plpgsql_call_set(other) $rf$); INSERT INTO basic_dml (other) VALUES (500), (501); SELECT other FROM basic_dml WHERE other IN (500,501); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT other FROM basic_dml WHERE other IN (500,501); \c :provider_dsn DROP FUNCTION func_plpgsql_simple(integer); DROP FUNCTION func_plpgsql_logic(integer); DROP FUNCTION func_plpgsql_security_definer(integer); DROP FUNCTION func_plpgsql_exception(integer); DROP FUNCTION func_plpgsql_srf_retq(integer); DROP FUNCTION func_plpgsql_call_set(integer); DROP ROLE temp_owner; --------------------------------------------------- -- ^^^ End plpgsql tests --------------------------------------------------- \c :provider_dsn \set VERBOSITY terse DROP FUNCTION funcn_add(integer, integer); DROP FUNCTION funcn_nochange(text); DROP FUNCTION funcn_get_curr_decade(); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.basic_dml CASCADE; DROP TABLE public.test_jsonb CASCADE; $$); pglogical-REL2_4_1/sql/row_filter_sampling.sql000066400000000000000000000036751415142317000215630ustar00rootroot00000000000000-- row based filtering SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn -- testing volatile sampling function in row_filter SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_tablesample (id int primary key, name text) WITH (fillfactor=10); $$); -- use fillfactor so we don't have to load too much data to get multiple pages INSERT INTO test_tablesample SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i); create or replace function funcn_get_system_sample_count(integer, integer) returns bigint as $$ (SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM ($1) REPEATABLE ($2)); $$ language sql volatile; create or replace function funcn_get_bernoulli_sample_count(integer, integer) returns bigint as $$ (SELECT count(*) FROM test_tablesample TABLESAMPLE BERNOULLI ($1) REPEATABLE ($2)); $$ language sql volatile; SELECT * FROM pglogical.replication_set_add_table('default', 'test_tablesample', false, row_filter := $rf$id > funcn_get_system_sample_count(100, 3) $rf$); SELECT * FROM pglogical.replication_set_remove_table('default', 'test_tablesample'); SELECT * FROM pglogical.replication_set_add_table('default', 'test_tablesample', true, row_filter := $rf$id > funcn_get_bernoulli_sample_count(10, 0) $rf$); SELECT * FROM test_tablesample ORDER BY id limit 5; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn BEGIN; SET LOCAL statement_timeout = '10s'; SELECT pglogical.wait_for_table_sync_complete('test_subscription', 'test_tablesample'); COMMIT; SELECT sync_kind, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status WHERE sync_relname = 'test_tablesample'; SELECT * FROM test_tablesample ORDER BY id limit 5; \c :provider_dsn \set VERBOSITY terse DROP FUNCTION funcn_get_system_sample_count(integer, integer); DROP FUNCTION funcn_get_bernoulli_sample_count(integer, integer); SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_tablesample CASCADE; $$); pglogical-REL2_4_1/sql/toasted.sql000066400000000000000000000161141415142317000171500ustar00rootroot00000000000000-- test toasted data SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn BEGIN; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.toasted ( id serial primary key, other text, data text NOT NULL ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'toasted'); SELECT pglogical.replicate_ddl_command($$ ALTER TABLE public.toasted ALTER COLUMN data SET STORAGE EXTERNAL; $$); COMMIT; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); -- check replication of toast values INSERT INTO toasted(other, data) VALUES('foo', repeat('1234567890', 300)); -- check that unchanged toast values work correctly UPDATE toasted SET other = 'foo2'; -- check that changed toast values are replicated UPDATE toasted SET other = 'foo3', data = '-'||data; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM toasted ORDER BY id; \c :provider_dsn \copy toasted from stdin with csv 9000,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,bar 9001,bbb,ccc 9002,ddd,eee 9003,bar,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 9004,fff,hhh \. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM toasted ORDER BY id; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.toasted CASCADE; $$); pglogical-REL2_4_1/sql/triggers.sql000066400000000000000000000100541415142317000173300ustar00rootroot00000000000000SELECT * FROM pglogical_regress_variables() \gset \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.test_trg_data(id serial primary key, data text); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'test_trg_data'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn CREATE TABLE test_trg_hist(table_name text, action text, action_id serial, original_data text, new_data text); CREATE FUNCTION test_trg_data_hist_fn() RETURNS TRIGGER AS $$ BEGIN IF (TG_OP = 'UPDATE') THEN INSERT INTO test_trg_hist (table_name,action,original_data,new_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(OLD.*), ROW(NEW.*)); RETURN NEW; ELSIF (TG_OP = 'DELETE') THEN INSERT INTO test_trg_hist (table_name,action,original_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(OLD.*)); RETURN OLD; ELSIF (TG_OP = 'INSERT') THEN INSERT INTO test_trg_hist (table_name,action,new_data) VALUES (TG_TABLE_NAME::TEXT, substring(TG_OP,1,1), ROW(NEW.*)); RETURN NEW; ELSE RAISE WARNING 'Unknown action'; RETURN NULL; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER test_trg_data_hist_trg AFTER INSERT OR UPDATE OR DELETE ON test_trg_data FOR EACH ROW EXECUTE PROCEDURE test_trg_data_hist_fn(); \c :provider_dsn INSERT INTO test_trg_data(data) VALUES ('no_history'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM test_trg_data; SELECT * FROM test_trg_hist; ALTER TABLE test_trg_data ENABLE REPLICA TRIGGER test_trg_data_hist_trg; \c :provider_dsn INSERT INTO test_trg_data(data) VALUES ('yes_history'); UPDATE test_trg_data SET data = 'yes_history'; DELETE FROM test_trg_data; SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn SELECT * FROM test_trg_data; SELECT * FROM test_trg_hist; DROP TABLE test_trg_hist CASCADE; \c :provider_dsn SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- create row filter trigger CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS $$ BEGIN IF (TG_OP in ('UPDATE', 'INSERT')) THEN -- treating 'DELETE' as pass-through IF (NEW.id > 1 AND NEW.data IS DISTINCT FROM 'baz' AND NEW.data IS DISTINCT FROM 'bbb') THEN RETURN NEW; ELSE RETURN NULL; END IF; ELSE RAISE WARNING 'Unknown action'; RETURN NULL; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER filter_basic_dml_trg BEFORE INSERT OR UPDATE ON basic_dml FOR EACH ROW EXECUTE PROCEDURE filter_basic_dml_fn(); \c :provider_dsn -- insert into table at provider \COPY basic_dml FROM STDIN WITH CSV 5000,1,aaa,1 hour 5001,2,bbb,2 years 5002,3,ccc,3 minutes 5003,4,ddd,4 days \. SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- rows received at suscriber as trigger is not enabled yet. SELECT * from basic_dml ORDER BY id; -- Now enable trigger: ALTER TABLE basic_dml ENABLE REPLICA TRIGGER filter_basic_dml_trg; \c :provider_dsn TRUNCATE basic_dml; -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); \c :subscriber_dsn -- rows filtered at suscriber as trigger is enabled. SELECT * from basic_dml ORDER BY id; \set VERBOSITY terse DROP FUNCTION test_trg_data_hist_fn() CASCADE; DROP FUNCTION filter_basic_dml_fn() CASCADE; \c :provider_dsn \set VERBOSITY terse SELECT pglogical.replicate_ddl_command($$ DROP TABLE public.test_trg_data CASCADE; DROP TABLE public.basic_dml CASCADE; $$); pglogical-REL2_4_1/t/000077500000000000000000000000001415142317000144255ustar00rootroot00000000000000pglogical-REL2_4_1/t/010_pglogical_create_subscriber.pl000077500000000000000000000123601415142317000230560ustar00rootroot00000000000000use strict; use warnings; use Cwd; use Config; use TestLib; use Test::More tests => 11; my $PGPORT=$ENV{'PGPORT'}; my $PROVIDER_PORT=5431; my $PROVIDER_DSN = "postgresql://super\@localhost:$PROVIDER_PORT/postgres"; my $SUBSCRIBER_DSN = "postgresql://super\@localhost:$PGPORT/postgres"; program_help_ok('pglogical_create_subscriber'); program_options_handling_ok('pglogical_create_subscriber'); system_or_bail 'rm', '-rf', '/tmp/tmp_datadir'; system_or_bail 'rm', '-rf', '/tmp/tmp_backupdir'; system_or_bail 'initdb', '-A trust', '-D', '/tmp/tmp_datadir'; system_or_bail 'pwd'; system_or_bail 'cp', 'regress-pg_hba.conf', '/tmp/tmp_datadir/pg_hba.conf'; my $pg_version = `pg_config --version| sed 's/[^0-9\.]//g' | awk -F . '{ print \$1\$2 }'`; if ($pg_version >= 95) { `cat t/perl-95-postgresql.conf>>/tmp/tmp_datadir/postgresql.conf`; } else { `cat t/perl-94-postgresql.conf>>/tmp/tmp_datadir/postgresql.conf`; } system("postgres -p $PROVIDER_PORT -D /tmp/tmp_datadir -c logging_collector=on &"); #allow Postgres server to startup system_or_bail 'sleep', '17'; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE USER super SUPERUSER"; # insert some pre-seed data system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE SEQUENCE some_local_seq"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE TABLE some_local_tbl(id serial primary key, key text unique not null, data text)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl(key, data) VALUES('key1', 'data1')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl(key, data) VALUES('key2', NULL)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl(key, data) VALUES('key3', 'data3')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE TABLE some_local_tbl1(id serial, key text unique not null, data text)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl1(key, data) VALUES('key1', 'data1')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl1(key, data) VALUES('key2', NULL)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl1(key, data) VALUES('key3', 'data3')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE TABLE some_local_tbl2(id serial, key text, data text)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl2(key, data) VALUES('key1', 'data1')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl2(key, data) VALUES('key2', NULL)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl2(key, data) VALUES('key3', 'data3')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE TABLE some_local_tbl3(id integer, key text, data text)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl3(key, data) VALUES('key1', 'data1')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl3(key, data) VALUES('key2', NULL)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "INSERT INTO some_local_tbl3(key, data) VALUES('key3', 'data3')"; # Required for PostgreSQL 9.4 run #system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical_origin"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "ALTER EXTENSION pglogical UPDATE"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := 'dbname=postgres user=super')"; command_ok([ 'pglogical_create_subscriber', '-D', '/tmp/tmp_backupdir', "--subscriber-name=test_subscriber", "--subscriber-dsn=$SUBSCRIBER_DSN", "--provider-dsn=$PROVIDER_DSN", '--drop-slot-if-exists', '-v', '--hba-conf=regress-pg_hba.conf', '--postgresql-conf=/tmp/tmp_datadir/postgresql.conf'], 'pglogical_create_subscriber check'); #test whether preseed data is there command_ok([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "\\d" ], 'preseed check 1 '); command_ok([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * from some_local_tbl" ], 'preseed check 2 '); #insert some new data system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-f', 't/basic.sql'; #allow script to complete executing system_or_bail 'sleep', '11'; #check whether it is replicated command_ok([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "\\d" ], 'replication check 1 '); command_ok([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * from basic_dml" ], 'replication check 2 '); command_ok([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * from public.basic_dml" ], 'replication check 3 '); #cleanup system("pg_ctl stop -D /tmp/tmp_backupdir -m immediate &"); system("pg_ctl stop -D /tmp/tmp_datadir -m immediate &"); pglogical-REL2_4_1/t/020_non_default_replication_set.pl000077500000000000000000000131501415142317000231100ustar00rootroot00000000000000use strict; use warnings; use Cwd; use Config; use TestLib; use Test::More tests => 1; my $PGPORT=65432; #subscriber's port my $PROVIDER_PORT=65431; my $PROVIDER_DSN = "postgresql://super\@localhost:$PROVIDER_PORT/postgres"; my $SUBSCRIBER_DSN = "postgresql://super\@localhost:$PGPORT/postgres"; system_or_bail 'rm', '-rf', '/tmp/tmp_020_pdatadir'; system_or_bail 'rm', '-rf', '/tmp/tmp_020_sdatadir'; #provider's and subscriber's datadir system_or_bail 'initdb', '-A trust', '-D', '/tmp/tmp_020_pdatadir'; system_or_bail 'initdb', '-A trust', '-D', '/tmp/tmp_020_sdatadir'; system_or_bail 'pwd'; system_or_bail 'cp', 'regress-pg_hba.conf', '/tmp/tmp_020_pdatadir/pg_hba.conf'; system_or_bail 'cp', 'regress-pg_hba.conf', '/tmp/tmp_020_sdatadir/pg_hba.conf'; my $pg_version = `pg_config --version| sed 's/[^0-9\.]//g' | awk -F . '{ print \$1\$2 }'`; if ($pg_version >= 95) { `cat t/perl-95-postgresql.conf>>/tmp/tmp_020_pdatadir/postgresql.conf`; `cat t/perl-95-postgresql.conf>>/tmp/tmp_020_sdatadir/postgresql.conf`; } else { `cat t/perl-94-postgresql.conf>>/tmp/tmp_020_pdatadir/postgresql.conf`; `cat t/perl-94-postgresql.conf>>/tmp/tmp_020_sdatadir/postgresql.conf`; } system("postgres -p $PROVIDER_PORT -D /tmp/tmp_020_pdatadir -c logging_collector=on &"); system("postgres -p $PGPORT -D /tmp/tmp_020_sdatadir -c logging_collector=on &"); #allow Postgres servers to startup system_or_bail 'sleep', '17'; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE USER super SUPERUSER"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "CREATE USER super SUPERUSER"; # Required for PostgreSQL 9.4 run #system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical_origin"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "ALTER EXTENSION pglogical UPDATE"; # Required for PostgreSQL 9.4 run #system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical_origin"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := 'dbname=postgres user=super')"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := '$SUBSCRIBER_DSN')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.create_replication_set('delay')"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_delay', provider_dsn := '$PROVIDER_DSN', replication_sets := '{delay}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false )"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "DO \$\$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; \$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status()"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "DO \$\$ BEGIN FOR i IN 1..300 LOOP IF EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status = 'r') THEN EXIT; END IF; PERFORM pg_sleep(0.1); END LOOP; END;\$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status ORDER BY 2,3,4"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "CREATE OR REPLACE FUNCTION public.pg_xlog_wait_remote_apply(i_pos pg_lsn, i_pid integer) RETURNS VOID AS \$FUNC\$ BEGIN WHILE EXISTS(SELECT true FROM pg_stat_get_wal_senders() s WHERE s.replay_location < i_pos AND (i_pid = 0 OR s.pid = i_pid)) LOOP PERFORM pg_sleep(0.01); END LOOP; END;\$FUNC\$ LANGUAGE plpgsql"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "SELECT pglogical.replicate_ddl_command(\$\$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); \$\$)"; system_or_bail 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "select * from pglogical.show_subscription_status('test_subscription_delay');"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-d', "postgres", '-c', "SELECT * FROM pglogical.replication_set_add_table('delay', 'basic_dml1', true) "; #At this point subscriber sync starts crashing (`sync worker`) and recovering # check logs at this point at: # /tmp/tmp_020_pdatadir and /tmp/tmp_020_sdatadir # As per Petr this is expected behavior. # But since the table does not exist on subscriber, the sync worker dies when trying # to accessing it. It even logs why it dies on the line above. system_or_bail 'sleep', '10'; command_fails([ 'psql', '-p', "$PGPORT", '-d', "postgres", '-c', "SELECT * FROM basic_dml1" ], 'replication check'); #cleanup system("pg_ctl stop -D /tmp/tmp_020_sdatadir -m immediate &"); system("pg_ctl stop -D /tmp/tmp_020_pdatadir -m immediate &"); pglogical-REL2_4_1/t/030_pglogical_daylight_times_switching.pl000077500000000000000000000231571415142317000244650ustar00rootroot00000000000000use strict; use warnings; use Cwd; use Config; use TestLib; use Test::More; #use Test::More tests => 6; # This is a special test, that tries to modify system time # Not required to run in usual suite tests TODO: { todo_skip 'Whole test need rewriting using the new framework, with no sudo etc', 1; my $PGPORT=65432; #subscriber's port my $PROVIDER_PORT=65431; my $PROVIDER_DSN = "postgresql://super\@localhost:$PROVIDER_PORT/postgres"; my $SUBSCRIBER_DSN = "postgresql://super\@localhost:$PGPORT/postgres"; #This test requires user to be a part of sudo group #for time and timezone changes #It is interactive now as it needs password to do sudo system_or_bail 'rm', '-rf', '/tmp/tmp_030_pdatadir'; system_or_bail 'rm', '-rf', '/tmp/tmp_030_sdatadir'; #bail out if ntp not installed - as we need it to set time back. system_or_bail 'sudo', 'sntp', '-s', '24.56.178.140'; my $timezone = `timedatectl |grep \'Timezone\'|cut -d ':' -f 2|cut -d ' ' -f 2|sed 's/ //g'`; #change timezone to before daylight savings border. command_ok([ 'timedatectl', 'set-timezone', 'America/Los_Angeles' ], 'pre-daylight savings time-zone check '); command_ok([ 'timedatectl', 'set-time', "2016-11-05 06:40:00" ], 'pre-daylight savings time check'); #sleep for a short while after this date change system_or_bail 'sleep', '10'; #provider's and subscriber's datadir system_or_bail 'initdb', '-A trust', '-D', '/tmp/tmp_030_pdatadir'; system_or_bail 'initdb', '-A trust', '-D', '/tmp/tmp_030_sdatadir'; system_or_bail 'pwd'; system_or_bail 'cp', 'regress-pg_hba.conf', '/tmp/tmp_030_pdatadir/pg_hba.conf'; system_or_bail 'cp', 'regress-pg_hba.conf', '/tmp/tmp_030_sdatadir/pg_hba.conf'; my $pg_version = `pg_config --version| sed 's/[^0-9\.]//g' | awk -F . '{ print \$1\$2 }'`; if ($pg_version >= 95) { `cat t/perl-95-postgresql.conf>>/tmp/tmp_030_pdatadir/postgresql.conf`; `cat t/perl-95-postgresql.conf>>/tmp/tmp_030_sdatadir/postgresql.conf`; } else { `cat t/perl-94-postgresql.conf>>/tmp/tmp_030_pdatadir/postgresql.conf`; `cat t/perl-94-postgresql.conf>>/tmp/tmp_030_sdatadir/postgresql.conf`; } system("postgres -p $PROVIDER_PORT -D /tmp/tmp_030_pdatadir -c logging_collector=on &"); system("postgres -p $PGPORT -D /tmp/tmp_030_sdatadir -c logging_collector=on &"); #allow Postgres servers to startup system_or_bail 'sleep', '17'; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "CREATE USER super SUPERUSER"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "CREATE USER super SUPERUSER"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0'"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "ALTER EXTENSION pglogical UPDATE"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "CREATE EXTENSION IF NOT EXISTS pglogical"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := 'dbname=postgres user=super')"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := '$SUBSCRIBER_DSN')"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT * FROM pglogical.create_replication_set('delay')"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "CREATE or REPLACE function int2interval (x integer) returns interval as \$\$ select \$1*'1 sec'::interval \$\$ language sql"; # create default subscription system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription', provider_dsn := '$PROVIDER_DSN', forward_origins := '{}', synchronize_structure := false, synchronize_data := false )"; # create delayed subscription too. system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT * FROM pglogical.create_subscription( subscription_name := 'test_subscription_delay', provider_dsn := '$PROVIDER_DSN', replication_sets := '{delay}', forward_origins := '{}', synchronize_structure := false, synchronize_data := false, apply_delay := int2interval(1) -- 1 seconds )"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "DO \$\$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating' AND subscription_name = 'test_subscription_delay') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; \$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status()"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "DO \$\$ BEGIN FOR i IN 1..300 LOOP IF EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status != 'r') THEN PERFORM pg_sleep(0.1); ELSE EXIT; END IF; END LOOP; END;\$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status ORDER BY 2,3,4"; #change timezone to after daylight savings border. command_ok([ 'timedatectl', 'set-time', "2016-11-06 06:40:00" ], 'switching daylight savings time check'); # sleep for ~5 mins to allow both servers to recover system_or_bail 'sleep', '300'; system_or_bail 'psql', '-p', "$PGPORT", '-c', "DO \$\$ BEGIN FOR i IN 1..100 LOOP IF EXISTS (SELECT 1 FROM pglogical.show_subscription_status() WHERE status = 'replicating' AND subscription_name = 'test_subscription_delay') THEN RETURN; END IF; PERFORM pg_sleep(0.1); END LOOP; END; \$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT subscription_name, status, provider_node, replication_sets, forward_origins FROM pglogical.show_subscription_status()"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "DO \$\$ BEGIN FOR i IN 1..300 LOOP IF EXISTS (SELECT 1 FROM pglogical.local_sync_status WHERE sync_status != 'r') THEN PERFORM pg_sleep(0.1); ELSE EXIT; END IF; END LOOP; END;\$\$"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT sync_kind, sync_subid, sync_nspname, sync_relname, sync_status FROM pglogical.local_sync_status ORDER BY 2,3,4"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "CREATE OR REPLACE FUNCTION public.pg_xlog_wait_remote_apply(i_pos pg_lsn, i_pid integer) RETURNS VOID AS \$FUNC\$ BEGIN WHILE EXISTS(SELECT true FROM pg_stat_get_wal_senders() s WHERE s.replay_location < i_pos AND (i_pid = 0 OR s.pid = i_pid)) LOOP PERFORM pg_sleep(0.01); END LOOP; END;\$FUNC\$ LANGUAGE plpgsql"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "CREATE TABLE public.timestamps ( id text primary key, ts timestamptz )"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT pglogical.replicate_ddl_command(\$\$ CREATE TABLE public.basic_dml1 ( id serial primary key, other integer, data text, something interval ); \$\$)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT pg_xlog_wait_remote_apply(pg_current_xlog_location(), 0)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "INSERT INTO timestamps VALUES ('ts1', CURRENT_TIMESTAMP)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT * FROM pglogical.replication_set_add_table('delay', 'basic_dml1', true) "; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT pg_xlog_wait_remote_apply(pg_current_xlog_location(), 0)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "INSERT INTO timestamps VALUES ('ts2', CURRENT_TIMESTAMP)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "INSERT INTO basic_dml1(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT pg_xlog_wait_remote_apply(pg_current_xlog_location(), 0)"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "INSERT INTO timestamps VALUES ('ts3', CURRENT_TIMESTAMP)"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "select * from pglogical.show_subscription_status('test_subscription_delay');"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts1'))) :: integer as ddl_replicate_time"; system_or_bail 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT round (EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts3')) - EXTRACT(EPOCH FROM (SELECT ts from timestamps where id = 'ts2'))) :: integer as inserts_replicate_time"; command_ok([ 'psql', '-p', "$PROVIDER_PORT", '-c', "SELECT * FROM basic_dml1" ], 'provider data check'); system_or_bail 'psql', '-p', "$PGPORT", '-c', "select * from pglogical.show_subscription_status('test_subscription_delay');"; system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT * FROM pglogical.show_subscription_table('test_subscription_delay', 'basic_dml1')"; #check the data of table at subscriber command_ok([ 'psql', '-p', "$PGPORT", '-c', "SELECT * FROM basic_dml1" ], 'replication check'); system_or_bail 'psql', '-p', "$PGPORT", '-c', "SELECT pglogical.drop_subscription('test_subscription_delay')"; #cleanup system("pg_ctl stop -D /tmp/tmp_030_sdatadir -m immediate &"); system("pg_ctl stop -D /tmp/tmp_030_pdatadir -m immediate &"); # change time and timezone back to normal: command_ok([ 'sudo', 'sntp', '-s', '24.56.178.140' ], 'sync time with ntp server check'); system("timedatectl set-timezone $timezone"); } done_testing(); pglogical-REL2_4_1/t/040_pglogical_sync_during_write.pl000066400000000000000000000256251415142317000231360ustar00rootroot00000000000000# # Test for RT#60889, trying to reproduce an issue where sync fails during # catchup with a walsender timeout for as-yet-unknown reasons. # use strict; use warnings; use PostgresNode; use TestLib; use Data::Dumper; use Test::More; use Time::HiRes; use Carp; my $PGBENCH_SCALE = $ENV{PGBENCH_SCALE} // 1; my $PGBENCH_CLIENTS = $ENV{PGBENCH_CLIENTS} // 10; my $PGBENCH_JOBS = $ENV{PGBENCH_JOBS} // 1; my $PGBENCH_TIME = $ENV{PGBENCH_TIME} // 120; my $WALSENDER_TIMEOUT = $ENV{PGBENCH_TIMEOUT} // '5s'; $SIG{__DIE__} = sub { Carp::confess @_ }; $SIG{INT} = sub { die("interupted by SIGINT"); }; my $dbname="pgltest"; my $super_user="super"; my $node_provider = get_new_node('provider'); $node_provider->init(); $node_provider->append_conf('postgresql.conf', qq[ wal_level = 'logical' max_replication_slots = 12 max_wal_senders = 12 wal_sender_timeout = '$WALSENDER_TIMEOUT' max_connections = 200 log_line_prefix = '%t %p ' shared_preload_libraries = 'pglogical' track_commit_timestamp = on pglogical.synchronous_commit = true ]); $node_provider->dump_info; $node_provider->start; $node_provider->safe_psql('postgres', "CREATE DATABASE $dbname"); my $node_subscriber = get_new_node('subscriber'); $node_subscriber->init(); $node_subscriber->append_conf('postgresql.conf', qq[ shared_preload_libraries = 'pglogical' wal_level = logical max_wal_senders = 10 max_replication_slots = 10 track_commit_timestamp = on fsync=off pglogical.synchronous_commit = true log_line_prefix = '%t %p ' ]); $node_subscriber->dump_info; $node_subscriber->start; $node_subscriber->safe_psql('postgres', "CREATE DATABASE $dbname"); # Create provider node on master: $node_provider->safe_psql($dbname, "CREATE USER $super_user SUPERUSER;"); $node_provider->safe_psql($dbname, "CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0';"); $node_provider->safe_psql($dbname, "ALTER EXTENSION pglogical UPDATE;"); my $provider_connstr = $node_provider->connstr; print "node_provider - connstr : $provider_connstr\n"; $node_provider->safe_psql($dbname, "SELECT * FROM pglogical.create_node(node_name := 'test_provider', dsn := '$provider_connstr dbname=$dbname user=$super_user');"); # Create subscriber node on subscriber: $node_subscriber->safe_psql($dbname, "CREATE USER $super_user SUPERUSER;"); $node_subscriber->safe_psql($dbname, "CREATE EXTENSION IF NOT EXISTS pglogical VERSION '1.0.0';"); $node_subscriber->safe_psql($dbname, "ALTER EXTENSION pglogical UPDATE;"); my $subscriber_connstr = $node_subscriber->connstr; print "node_subscriber - connstr : $subscriber_connstr\n"; $node_subscriber->safe_psql($dbname, "SELECT * FROM pglogical.create_node(node_name := 'test_subscriber', dsn := '$subscriber_connstr dbname=$dbname user=$super_user');"); # Initialise pgbench on provider and print initial data count in tables $node_provider->command_ok([ 'pgbench', '-i', '-s', $PGBENCH_SCALE, $dbname], 'initialize pgbench'); my @pgbench_tables = ('pgbench_accounts', 'pgbench_tellers', 'pgbench_history'); # Add pgbench tables to repset for my $tbl (@pgbench_tables) { my $setname = 'default'; $setname = 'default_insert_only' if ($tbl eq 'pgbench_history'); $node_provider->safe_psql($dbname, "SELECT * FROM pglogical.replication_set_add_table('$setname', '$tbl', false);"); } $node_subscriber->safe_psql($dbname, "SELECT pglogical.create_subscription( subscription_name := 'test_subscription', synchronize_structure := true, synchronize_data := true, provider_dsn := '$provider_connstr dbname=$dbname user=$super_user' );"); $node_subscriber->poll_query_until($dbname, q[SELECT EXISTS (SELECT 1 FROM pglogical.show_subscription_status() where subscription_name = 'test_subscription' AND status = 'replicating')]) or BAIL_OUT('subscription failed to reach "replicating" state'); # Make write-load active on the tables pgbench_history # with this TPC-B-ish run. Run it in the background. diag "provider is" . $node_provider->name; diag "max_connections is " . $node_provider->safe_psql($dbname, 'SHOW max_connections;'); my $pgbench_stdout=''; my $pgbench_stderr=''; my $pgbench_handle = IPC::Run::start( [ 'pgbench', '-T', $PGBENCH_TIME, '-j', $PGBENCH_JOBS, '-s', $PGBENCH_SCALE, '-c', $PGBENCH_CLIENTS, $node_provider->connstr($dbname)], '>', \$pgbench_stdout, '2>', \$pgbench_stderr); $pgbench_handle->pump(); # Wait for pgbench to connect $node_provider->poll_query_until($dbname, q[SELECT EXISTS (SELECT 1 FROM pg_stat_activity WHERE query like 'UPDATE pgbench%')]) or BAIL_OUT('pgbench process is not running currently'); $node_provider->safe_psql($dbname, q[ALTER SYSTEM SET log_statement = 'ddl']); $node_provider->safe_psql($dbname, q[SELECT pg_reload_conf();]); # Let it warm up for a while note "warming up pgbench for " . ($PGBENCH_TIME/10) . "s"; sleep($PGBENCH_TIME/10); note "done warmup"; open(my $publog, "<", $node_provider->logfile) or die "can't open log file for provider at " . $node_provider->logfile . ": $!"; open(my $sublog, "<", $node_subscriber->logfile) or die "can't open log file for subscriber at " . $node_subscriber->logfile . ": $!"; my $walsender_pid = int($node_provider->safe_psql($dbname, q[SELECT pid FROM pg_stat_activity WHERE application_name = 'test_subscription'])); my $apply_pid = int($node_subscriber->safe_psql($dbname, q[SELECT pid FROM pg_stat_activity WHERE application_name LIKE '%apply%'])); note "wal sender pid is $walsender_pid; apply worker pid is $apply_pid"; # Seek to log EOF seek($publog, 2, 0); seek($sublog, 2, 0); my $i = 1; do { # Resync all the tables in turn EACH_TABLE: for my $tbl (@pgbench_tables) { my $publogpos = tell($publog); my $sublogpos = tell($sublog); my $resync_start = [Time::HiRes::gettimeofday()]; eval { $node_subscriber->safe_psql($dbname, "SELECT * FROM pglogical.alter_subscription_resynchronize_table('test_subscription', '$tbl');"); }; if ($@) { diag "attempt to resync $tbl failed with $@; sync_status is currently " . $node_subscriber->safe_psql($dbname, "SELECT sync_status FROM pglogical.local_sync_status WHERE sync_relname = '$tbl'"); fail("$tbl didn't sync: resync request failed"); next EACH_TABLE; } while (1) { Time::HiRes::usleep(100); my $running = $node_subscriber->safe_psql($dbname, qq[SELECT pid FROM pg_stat_activity WHERE application_name LIKE '%sync%']); my $status = $node_subscriber->safe_psql($dbname, qq[SELECT sync_status FROM pglogical.local_sync_status WHERE sync_relname = '$tbl']); if ($status eq 'r') { pass("$tbl synced on iteration $i (elapsed " . Time::HiRes::tv_interval($resync_start) . ")" ); last; } elsif ($status eq 'i') { # worker still starting } elsif ($status eq 'y') { # worker done but master hasn't noticed yet # keep looping until master notices and switches to 'r' } elsif (!$running) { fail("$tbl didn't sync on iteration $i, sync worker exited (running=$running) while sync state was '$status' (elapsed " . Time::HiRes::tv_interval($resync_start) . ")" ); } } # look for walsender timeouts in logs since last test # We must seek to reset any prior eof marker seek($publog, 0, $publogpos); seek($sublog, 0, $sublogpos); # then look for log lines of interest my $timeout_line; my $finished_sync_line; while (my $line = <$publog>) { if ($line =~ qr/replication timeout/ && !$timeout_line) { $timeout_line = $line; diag "status line after failed sync is " . $node_subscriber->safe_psql($dbname, qq[SELECT * FROM pglogical.local_sync_status WHERE sync_relname = '$tbl']); if ($line =~ qr/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [A-Z]+ (\d+) LOG:/) { if (int($1) == $walsender_pid) { diag "terminated walsender was the main walsender for the apply worker, trying to make apply core of pid $apply_pid"; system("gcore -o tmp_check/core.$apply_pid $apply_pid") and diag "core saved as tmp_check/core.$apply_pid" or diag "couldn't save core, see logs"; } else { diag "terminated walsender was not for apply worker, looking for a sync worker"; my $sync_pid = int($node_subscriber->safe_psql($dbname, q[SELECT pid FROM pg_stat_activity WHERE application_name LIKE '%sync%'])); if ($sync_pid) { diag "found running sync worker $sync_pid, trying to make core"; system("gcore $sync_pid"); } else { diag "no sync worker found running"; } } } else { carp "couldn't match line format for $line"; } } } while (my $line = <$sublog>) { if ($line =~ qr/finished sync of table/ && !$finished_sync_line) { $finished_sync_line = $line; } } # This test is racey, because we don't emit this message until # after the sync commits so we quite possibly won't see it here # after we finish waiting for synced state. # #isnt($finished_sync_line, undef, "found finished sync line in last test logs"); is($timeout_line, undef, "no walsender timeout since last test"); } $i ++; # and repeat until pgbench exits } while ($node_provider->safe_psql($dbname, q[SELECT 1 FROM pg_stat_activity WHERE application_name = 'pgbench'])); $pgbench_handle->finish; note "##### output of pgbench #####"; note $pgbench_stdout; note "##### end of output #####"; is($pgbench_handle->full_result(0), 0, "pgbench run successfull "); note " waiting for catchup"; # Wait for catchup $node_provider->safe_psql($dbname, 'SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL);'); note "comparing tables"; # Compare final table entries on provider and subscriber. for my $tbl (@pgbench_tables) { my $rowcount_provider = $node_provider->safe_psql($dbname, "SELECT count(*) FROM $tbl;"); my $rowcount_subscriber = $node_subscriber->safe_psql($dbname, "SELECT count(*) FROM $tbl;"); my $matched = is($rowcount_subscriber, $rowcount_provider, "final $tbl row counts match after sync"); if (!$matched) { diag "final provider rowcount for $tbl is $rowcount_provider, but subscriber has $rowcount_subscriber"; my $sortkey; if ($tbl == "pgbench_history") { $sortkey = "1, 2, 3, 4"; } elsif ($tbl == "pgbench_tellers" || $tbl == "pgbench_accounts") { $sortkey = "1, 2"; } elsif ($tbl == "pgbench_branches") { $sortkey = "1"; } # Compare the tables $node_provider->safe_psql($dbname, qq[\\copy (SELECT * FROM $tbl ORDER BY $sortkey) to tmp_check/$tbl-provider]); $node_subscriber->safe_psql($dbname, qq[\\copy (SELECT * FROM $tbl ORDER BY $sortkey) to tmp_check/$tbl-subscriber]); $node_subscriber->safe_psql($dbname, qq[\\copy (SELECT * FROM $tbl, pglogical.xact_commit_timestamp_origin($tbl.xmin) ORDER BY $sortkey) to tmp_check/$tbl-subscriber-detail]); IPC::Run::run(['diff', '-u', "tmp_check/$tbl-provider", "tmp_check/$tbl-subscriber"], '>', "tmp_check/$tbl-diff"); diag "differences between $tbl on provider and subscriber recorded in tmp_check/"; } } $node_subscriber->teardown_node; $node_provider->teardown_node; done_testing(); pglogical-REL2_4_1/t/basic.sql000066400000000000000000000020671415142317000162340ustar00rootroot00000000000000-- basic builtin datatypes CREATE OR REPLACE FUNCTION public.pg_xlog_wait_remote_apply(i_pos pg_lsn, i_pid integer) RETURNS VOID AS $FUNC$ BEGIN WHILE EXISTS(SELECT true FROM pg_stat_get_wal_senders() s WHERE s.replay_location < i_pos AND (i_pid = 0 OR s.pid = i_pid)) LOOP PERFORM pg_sleep(0.01); END LOOP; END;$FUNC$ LANGUAGE plpgsql; SELECT pglogical.replicate_ddl_command($$ CREATE TABLE public.basic_dml ( id serial primary key, other integer, data text, something interval ); $$); SELECT * FROM pglogical.replication_set_add_table('default', 'basic_dml'); SELECT pg_xlog_wait_remote_apply(pg_current_xlog_location(), 0); -- check basic insert replication INSERT INTO basic_dml(other, data, something) VALUES (5, 'foo', '1 minute'::interval), (4, 'bar', '12 weeks'::interval), (3, 'baz', '2 years 1 hour'::interval), (2, 'qux', '8 months 2 days'::interval), (1, NULL, NULL); SELECT pg_xlog_wait_remote_apply(pg_current_xlog_location(), 0); SELECT id, other, data, something FROM basic_dml ORDER BY id; pglogical-REL2_4_1/t/perl-94-postgresql.conf000066400000000000000000000004441415142317000206730ustar00rootroot00000000000000# Configuration that affects behaviour being tested: shared_preload_libraries = 'pglogical' wal_level = logical max_wal_senders = 10 max_replication_slots = 10 # Purely testing related: DateStyle = 'ISO, DMY' log_line_prefix='[%m] [%p] [%d] ' fsync=off pglogical.synchronous_commit = true pglogical-REL2_4_1/t/perl-95-postgresql.conf000066400000000000000000000005261415142317000206750ustar00rootroot00000000000000# Configuration that affects behaviour being tested: shared_preload_libraries = 'pglogical' wal_level = logical max_wal_senders = 10 max_replication_slots = 10 track_commit_timestamp = on # Purely testing related: DateStyle = 'ISO, DMY' log_line_prefix='[%m] [%p] [%d] ' fsync=off log_statement = 'all' pglogical.synchronous_commit = true